LLVM  16.0.0git
ARMAsmParser.cpp
Go to the documentation of this file.
1 //===- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "ARMBaseInstrInfo.h"
10 #include "ARMFeatures.h"
14 #include "MCTargetDesc/ARMMCExpr.h"
17 #include "Utils/ARMBaseInfo.h"
18 #include "llvm/ADT/APFloat.h"
19 #include "llvm/ADT/APInt.h"
20 #include "llvm/ADT/None.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringMap.h"
25 #include "llvm/ADT/StringRef.h"
26 #include "llvm/ADT/StringSet.h"
27 #include "llvm/ADT/StringSwitch.h"
28 #include "llvm/ADT/Triple.h"
29 #include "llvm/ADT/Twine.h"
30 #include "llvm/MC/MCContext.h"
31 #include "llvm/MC/MCExpr.h"
32 #include "llvm/MC/MCInst.h"
33 #include "llvm/MC/MCInstrDesc.h"
34 #include "llvm/MC/MCInstrInfo.h"
41 #include "llvm/MC/MCRegisterInfo.h"
42 #include "llvm/MC/MCSection.h"
43 #include "llvm/MC/MCStreamer.h"
45 #include "llvm/MC/MCSymbol.h"
47 #include "llvm/MC/TargetRegistry.h"
49 #include "llvm/Support/ARMEHABI.h"
50 #include "llvm/Support/Casting.h"
52 #include "llvm/Support/Compiler.h"
55 #include "llvm/Support/SMLoc.h"
58 #include <algorithm>
59 #include <cassert>
60 #include <cstddef>
61 #include <cstdint>
62 #include <iterator>
63 #include <limits>
64 #include <memory>
65 #include <string>
66 #include <utility>
67 #include <vector>
68 
69 #define DEBUG_TYPE "asm-parser"
70 
71 using namespace llvm;
72 
73 namespace llvm {
74 extern const MCInstrDesc ARMInsts[];
75 } // end namespace llvm
76 
77 namespace {
78 
79 enum class ImplicitItModeTy { Always, Never, ARMOnly, ThumbOnly };
80 
81 static cl::opt<ImplicitItModeTy> ImplicitItMode(
82  "arm-implicit-it", cl::init(ImplicitItModeTy::ARMOnly),
83  cl::desc("Allow conditional instructions outdside of an IT block"),
84  cl::values(clEnumValN(ImplicitItModeTy::Always, "always",
85  "Accept in both ISAs, emit implicit ITs in Thumb"),
86  clEnumValN(ImplicitItModeTy::Never, "never",
87  "Warn in ARM, reject in Thumb"),
88  clEnumValN(ImplicitItModeTy::ARMOnly, "arm",
89  "Accept in ARM, reject in Thumb"),
90  clEnumValN(ImplicitItModeTy::ThumbOnly, "thumb",
91  "Warn in ARM, emit implicit ITs in Thumb")));
92 
93 static cl::opt<bool> AddBuildAttributes("arm-add-build-attributes",
94  cl::init(false));
95 
96 enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
97 
98 static inline unsigned extractITMaskBit(unsigned Mask, unsigned Position) {
99  // Position==0 means we're not in an IT block at all. Position==1
100  // means we want the first state bit, which is always 0 (Then).
101  // Position==2 means we want the second state bit, stored at bit 3
102  // of Mask, and so on downwards. So (5 - Position) will shift the
103  // right bit down to bit 0, including the always-0 bit at bit 4 for
104  // the mandatory initial Then.
105  return (Mask >> (5 - Position) & 1);
106 }
107 
108 class UnwindContext {
109  using Locs = SmallVector<SMLoc, 4>;
110 
111  MCAsmParser &Parser;
112  Locs FnStartLocs;
113  Locs CantUnwindLocs;
114  Locs PersonalityLocs;
115  Locs PersonalityIndexLocs;
116  Locs HandlerDataLocs;
117  int FPReg;
118 
119 public:
120  UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {}
121 
122  bool hasFnStart() const { return !FnStartLocs.empty(); }
123  bool cantUnwind() const { return !CantUnwindLocs.empty(); }
124  bool hasHandlerData() const { return !HandlerDataLocs.empty(); }
125 
126  bool hasPersonality() const {
127  return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
128  }
129 
130  void recordFnStart(SMLoc L) { FnStartLocs.push_back(L); }
131  void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(L); }
132  void recordPersonality(SMLoc L) { PersonalityLocs.push_back(L); }
133  void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(L); }
134  void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(L); }
135 
136  void saveFPReg(int Reg) { FPReg = Reg; }
137  int getFPReg() const { return FPReg; }
138 
139  void emitFnStartLocNotes() const {
140  for (const SMLoc &Loc : FnStartLocs)
141  Parser.Note(Loc, ".fnstart was specified here");
142  }
143 
144  void emitCantUnwindLocNotes() const {
145  for (const SMLoc &Loc : CantUnwindLocs)
146  Parser.Note(Loc, ".cantunwind was specified here");
147  }
148 
149  void emitHandlerDataLocNotes() const {
150  for (const SMLoc &Loc : HandlerDataLocs)
151  Parser.Note(Loc, ".handlerdata was specified here");
152  }
153 
154  void emitPersonalityLocNotes() const {
155  for (Locs::const_iterator PI = PersonalityLocs.begin(),
156  PE = PersonalityLocs.end(),
157  PII = PersonalityIndexLocs.begin(),
158  PIE = PersonalityIndexLocs.end();
159  PI != PE || PII != PIE;) {
160  if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
161  Parser.Note(*PI++, ".personality was specified here");
162  else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
163  Parser.Note(*PII++, ".personalityindex was specified here");
164  else
165  llvm_unreachable(".personality and .personalityindex cannot be "
166  "at the same location");
167  }
168  }
169 
170  void reset() {
171  FnStartLocs = Locs();
172  CantUnwindLocs = Locs();
173  PersonalityLocs = Locs();
174  HandlerDataLocs = Locs();
175  PersonalityIndexLocs = Locs();
176  FPReg = ARM::SP;
177  }
178 };
179 
180 // Various sets of ARM instruction mnemonics which are used by the asm parser
181 class ARMMnemonicSets {
182  StringSet<> CDE;
183  StringSet<> CDEWithVPTSuffix;
184 public:
185  ARMMnemonicSets(const MCSubtargetInfo &STI);
186 
187  /// Returns true iff a given mnemonic is a CDE instruction
188  bool isCDEInstr(StringRef Mnemonic) {
189  // Quick check before searching the set
190  if (!Mnemonic.startswith("cx") && !Mnemonic.startswith("vcx"))
191  return false;
192  return CDE.count(Mnemonic);
193  }
194 
195  /// Returns true iff a given mnemonic is a VPT-predicable CDE instruction
196  /// (possibly with a predication suffix "e" or "t")
197  bool isVPTPredicableCDEInstr(StringRef Mnemonic) {
198  if (!Mnemonic.startswith("vcx"))
199  return false;
200  return CDEWithVPTSuffix.count(Mnemonic);
201  }
202 
203  /// Returns true iff a given mnemonic is an IT-predicable CDE instruction
204  /// (possibly with a condition suffix)
205  bool isITPredicableCDEInstr(StringRef Mnemonic) {
206  if (!Mnemonic.startswith("cx"))
207  return false;
208  return Mnemonic.startswith("cx1a") || Mnemonic.startswith("cx1da") ||
209  Mnemonic.startswith("cx2a") || Mnemonic.startswith("cx2da") ||
210  Mnemonic.startswith("cx3a") || Mnemonic.startswith("cx3da");
211  }
212 
213  /// Return true iff a given mnemonic is an integer CDE instruction with
214  /// dual-register destination
215  bool isCDEDualRegInstr(StringRef Mnemonic) {
216  if (!Mnemonic.startswith("cx"))
217  return false;
218  return Mnemonic == "cx1d" || Mnemonic == "cx1da" ||
219  Mnemonic == "cx2d" || Mnemonic == "cx2da" ||
220  Mnemonic == "cx3d" || Mnemonic == "cx3da";
221  }
222 };
223 
224 ARMMnemonicSets::ARMMnemonicSets(const MCSubtargetInfo &STI) {
225  for (StringRef Mnemonic: { "cx1", "cx1a", "cx1d", "cx1da",
226  "cx2", "cx2a", "cx2d", "cx2da",
227  "cx3", "cx3a", "cx3d", "cx3da", })
228  CDE.insert(Mnemonic);
229  for (StringRef Mnemonic :
230  {"vcx1", "vcx1a", "vcx2", "vcx2a", "vcx3", "vcx3a"}) {
231  CDE.insert(Mnemonic);
232  CDEWithVPTSuffix.insert(Mnemonic);
233  CDEWithVPTSuffix.insert(std::string(Mnemonic) + "t");
234  CDEWithVPTSuffix.insert(std::string(Mnemonic) + "e");
235  }
236 }
237 
238 class ARMAsmParser : public MCTargetAsmParser {
239  const MCRegisterInfo *MRI;
240  UnwindContext UC;
241  ARMMnemonicSets MS;
242 
243  ARMTargetStreamer &getTargetStreamer() {
244  assert(getParser().getStreamer().getTargetStreamer() &&
245  "do not have a target streamer");
246  MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
247  return static_cast<ARMTargetStreamer &>(TS);
248  }
249 
250  // Map of register aliases registers via the .req directive.
251  StringMap<unsigned> RegisterReqs;
252 
253  bool NextSymbolIsThumb;
254 
255  bool useImplicitITThumb() const {
256  return ImplicitItMode == ImplicitItModeTy::Always ||
257  ImplicitItMode == ImplicitItModeTy::ThumbOnly;
258  }
259 
260  bool useImplicitITARM() const {
261  return ImplicitItMode == ImplicitItModeTy::Always ||
262  ImplicitItMode == ImplicitItModeTy::ARMOnly;
263  }
264 
265  struct {
266  ARMCC::CondCodes Cond; // Condition for IT block.
267  unsigned Mask:4; // Condition mask for instructions.
268  // Starting at first 1 (from lsb).
269  // '1' condition as indicated in IT.
270  // '0' inverse of condition (else).
271  // Count of instructions in IT block is
272  // 4 - trailingzeroes(mask)
273  // Note that this does not have the same encoding
274  // as in the IT instruction, which also depends
275  // on the low bit of the condition code.
276 
277  unsigned CurPosition; // Current position in parsing of IT
278  // block. In range [0,4], with 0 being the IT
279  // instruction itself. Initialized according to
280  // count of instructions in block. ~0U if no
281  // active IT block.
282 
283  bool IsExplicit; // true - The IT instruction was present in the
284  // input, we should not modify it.
285  // false - The IT instruction was added
286  // implicitly, we can extend it if that
287  // would be legal.
288  } ITState;
289 
290  SmallVector<MCInst, 4> PendingConditionalInsts;
291 
292  void flushPendingInstructions(MCStreamer &Out) override {
293  if (!inImplicitITBlock()) {
294  assert(PendingConditionalInsts.size() == 0);
295  return;
296  }
297 
298  // Emit the IT instruction
299  MCInst ITInst;
300  ITInst.setOpcode(ARM::t2IT);
301  ITInst.addOperand(MCOperand::createImm(ITState.Cond));
302  ITInst.addOperand(MCOperand::createImm(ITState.Mask));
303  Out.emitInstruction(ITInst, getSTI());
304 
305  // Emit the conditional instructions
306  assert(PendingConditionalInsts.size() <= 4);
307  for (const MCInst &Inst : PendingConditionalInsts) {
308  Out.emitInstruction(Inst, getSTI());
309  }
310  PendingConditionalInsts.clear();
311 
312  // Clear the IT state
313  ITState.Mask = 0;
314  ITState.CurPosition = ~0U;
315  }
316 
317  bool inITBlock() { return ITState.CurPosition != ~0U; }
318  bool inExplicitITBlock() { return inITBlock() && ITState.IsExplicit; }
319  bool inImplicitITBlock() { return inITBlock() && !ITState.IsExplicit; }
320 
321  bool lastInITBlock() {
322  return ITState.CurPosition == 4 - countTrailingZeros(ITState.Mask);
323  }
324 
325  void forwardITPosition() {
326  if (!inITBlock()) return;
327  // Move to the next instruction in the IT block, if there is one. If not,
328  // mark the block as done, except for implicit IT blocks, which we leave
329  // open until we find an instruction that can't be added to it.
330  unsigned TZ = countTrailingZeros(ITState.Mask);
331  if (++ITState.CurPosition == 5 - TZ && ITState.IsExplicit)
332  ITState.CurPosition = ~0U; // Done with the IT block after this.
333  }
334 
335  // Rewind the state of the current IT block, removing the last slot from it.
336  void rewindImplicitITPosition() {
337  assert(inImplicitITBlock());
338  assert(ITState.CurPosition > 1);
339  ITState.CurPosition--;
340  unsigned TZ = countTrailingZeros(ITState.Mask);
341  unsigned NewMask = 0;
342  NewMask |= ITState.Mask & (0xC << TZ);
343  NewMask |= 0x2 << TZ;
344  ITState.Mask = NewMask;
345  }
346 
347  // Rewind the state of the current IT block, removing the last slot from it.
348  // If we were at the first slot, this closes the IT block.
349  void discardImplicitITBlock() {
350  assert(inImplicitITBlock());
351  assert(ITState.CurPosition == 1);
352  ITState.CurPosition = ~0U;
353  }
354 
355  // Return the low-subreg of a given Q register.
356  unsigned getDRegFromQReg(unsigned QReg) const {
357  return MRI->getSubReg(QReg, ARM::dsub_0);
358  }
359 
360  // Get the condition code corresponding to the current IT block slot.
361  ARMCC::CondCodes currentITCond() {
362  unsigned MaskBit = extractITMaskBit(ITState.Mask, ITState.CurPosition);
363  return MaskBit ? ARMCC::getOppositeCondition(ITState.Cond) : ITState.Cond;
364  }
365 
366  // Invert the condition of the current IT block slot without changing any
367  // other slots in the same block.
368  void invertCurrentITCondition() {
369  if (ITState.CurPosition == 1) {
370  ITState.Cond = ARMCC::getOppositeCondition(ITState.Cond);
371  } else {
372  ITState.Mask ^= 1 << (5 - ITState.CurPosition);
373  }
374  }
375 
376  // Returns true if the current IT block is full (all 4 slots used).
377  bool isITBlockFull() {
378  return inITBlock() && (ITState.Mask & 1);
379  }
380 
381  // Extend the current implicit IT block to have one more slot with the given
382  // condition code.
383  void extendImplicitITBlock(ARMCC::CondCodes Cond) {
384  assert(inImplicitITBlock());
385  assert(!isITBlockFull());
386  assert(Cond == ITState.Cond ||
387  Cond == ARMCC::getOppositeCondition(ITState.Cond));
388  unsigned TZ = countTrailingZeros(ITState.Mask);
389  unsigned NewMask = 0;
390  // Keep any existing condition bits.
391  NewMask |= ITState.Mask & (0xE << TZ);
392  // Insert the new condition bit.
393  NewMask |= (Cond != ITState.Cond) << TZ;
394  // Move the trailing 1 down one bit.
395  NewMask |= 1 << (TZ - 1);
396  ITState.Mask = NewMask;
397  }
398 
399  // Create a new implicit IT block with a dummy condition code.
400  void startImplicitITBlock() {
401  assert(!inITBlock());
402  ITState.Cond = ARMCC::AL;
403  ITState.Mask = 8;
404  ITState.CurPosition = 1;
405  ITState.IsExplicit = false;
406  }
407 
408  // Create a new explicit IT block with the given condition and mask.
409  // The mask should be in the format used in ARMOperand and
410  // MCOperand, with a 1 implying 'e', regardless of the low bit of
411  // the condition.
412  void startExplicitITBlock(ARMCC::CondCodes Cond, unsigned Mask) {
413  assert(!inITBlock());
414  ITState.Cond = Cond;
415  ITState.Mask = Mask;
416  ITState.CurPosition = 0;
417  ITState.IsExplicit = true;
418  }
419 
420  struct {
421  unsigned Mask : 4;
422  unsigned CurPosition;
423  } VPTState;
424  bool inVPTBlock() { return VPTState.CurPosition != ~0U; }
425  void forwardVPTPosition() {
426  if (!inVPTBlock()) return;
427  unsigned TZ = countTrailingZeros(VPTState.Mask);
428  if (++VPTState.CurPosition == 5 - TZ)
429  VPTState.CurPosition = ~0U;
430  }
431 
432  void Note(SMLoc L, const Twine &Msg, SMRange Range = None) {
433  return getParser().Note(L, Msg, Range);
434  }
435 
436  bool Warning(SMLoc L, const Twine &Msg, SMRange Range = None) {
437  return getParser().Warning(L, Msg, Range);
438  }
439 
440  bool Error(SMLoc L, const Twine &Msg, SMRange Range = None) {
441  return getParser().Error(L, Msg, Range);
442  }
443 
444  bool validatetLDMRegList(const MCInst &Inst, const OperandVector &Operands,
445  unsigned ListNo, bool IsARPop = false);
446  bool validatetSTMRegList(const MCInst &Inst, const OperandVector &Operands,
447  unsigned ListNo);
448 
449  int tryParseRegister();
450  bool tryParseRegisterWithWriteBack(OperandVector &);
451  int tryParseShiftRegister(OperandVector &);
452  bool parseRegisterList(OperandVector &, bool EnforceOrder = true,
453  bool AllowRAAC = false);
454  bool parseMemory(OperandVector &);
455  bool parseOperand(OperandVector &, StringRef Mnemonic);
456  bool parseImmExpr(int64_t &Out);
457  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
458  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
459  unsigned &ShiftAmount);
460  bool parseLiteralValues(unsigned Size, SMLoc L);
461  bool parseDirectiveThumb(SMLoc L);
462  bool parseDirectiveARM(SMLoc L);
463  bool parseDirectiveThumbFunc(SMLoc L);
464  bool parseDirectiveCode(SMLoc L);
465  bool parseDirectiveSyntax(SMLoc L);
466  bool parseDirectiveReq(StringRef Name, SMLoc L);
467  bool parseDirectiveUnreq(SMLoc L);
468  bool parseDirectiveArch(SMLoc L);
469  bool parseDirectiveEabiAttr(SMLoc L);
470  bool parseDirectiveCPU(SMLoc L);
471  bool parseDirectiveFPU(SMLoc L);
472  bool parseDirectiveFnStart(SMLoc L);
473  bool parseDirectiveFnEnd(SMLoc L);
474  bool parseDirectiveCantUnwind(SMLoc L);
475  bool parseDirectivePersonality(SMLoc L);
476  bool parseDirectiveHandlerData(SMLoc L);
477  bool parseDirectiveSetFP(SMLoc L);
478  bool parseDirectivePad(SMLoc L);
479  bool parseDirectiveRegSave(SMLoc L, bool IsVector);
480  bool parseDirectiveInst(SMLoc L, char Suffix = '\0');
481  bool parseDirectiveLtorg(SMLoc L);
482  bool parseDirectiveEven(SMLoc L);
483  bool parseDirectivePersonalityIndex(SMLoc L);
484  bool parseDirectiveUnwindRaw(SMLoc L);
485  bool parseDirectiveTLSDescSeq(SMLoc L);
486  bool parseDirectiveMovSP(SMLoc L);
487  bool parseDirectiveObjectArch(SMLoc L);
488  bool parseDirectiveArchExtension(SMLoc L);
489  bool parseDirectiveAlign(SMLoc L);
490  bool parseDirectiveThumbSet(SMLoc L);
491 
492  bool parseDirectiveSEHAllocStack(SMLoc L, bool Wide);
493  bool parseDirectiveSEHSaveRegs(SMLoc L, bool Wide);
494  bool parseDirectiveSEHSaveSP(SMLoc L);
495  bool parseDirectiveSEHSaveFRegs(SMLoc L);
496  bool parseDirectiveSEHSaveLR(SMLoc L);
497  bool parseDirectiveSEHPrologEnd(SMLoc L, bool Fragment);
498  bool parseDirectiveSEHNop(SMLoc L, bool Wide);
499  bool parseDirectiveSEHEpilogStart(SMLoc L, bool Condition);
500  bool parseDirectiveSEHEpilogEnd(SMLoc L);
501  bool parseDirectiveSEHCustom(SMLoc L);
502 
503  bool isMnemonicVPTPredicable(StringRef Mnemonic, StringRef ExtraToken);
504  StringRef splitMnemonic(StringRef Mnemonic, StringRef ExtraToken,
505  unsigned &PredicationCode,
506  unsigned &VPTPredicationCode, bool &CarrySetting,
507  unsigned &ProcessorIMod, StringRef &ITMask);
508  void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef ExtraToken,
509  StringRef FullInst, bool &CanAcceptCarrySet,
510  bool &CanAcceptPredicationCode,
511  bool &CanAcceptVPTPredicationCode);
512  bool enableArchExtFeature(StringRef Name, SMLoc &ExtLoc);
513 
514  void tryConvertingToTwoOperandForm(StringRef Mnemonic, bool CarrySetting,
516  bool CDEConvertDualRegOperand(StringRef Mnemonic, OperandVector &Operands);
517 
518  bool isThumb() const {
519  // FIXME: Can tablegen auto-generate this?
520  return getSTI().getFeatureBits()[ARM::ModeThumb];
521  }
522 
523  bool isThumbOne() const {
524  return isThumb() && !getSTI().getFeatureBits()[ARM::FeatureThumb2];
525  }
526 
527  bool isThumbTwo() const {
528  return isThumb() && getSTI().getFeatureBits()[ARM::FeatureThumb2];
529  }
530 
531  bool hasThumb() const {
532  return getSTI().getFeatureBits()[ARM::HasV4TOps];
533  }
534 
535  bool hasThumb2() const {
536  return getSTI().getFeatureBits()[ARM::FeatureThumb2];
537  }
538 
539  bool hasV6Ops() const {
540  return getSTI().getFeatureBits()[ARM::HasV6Ops];
541  }
542 
543  bool hasV6T2Ops() const {
544  return getSTI().getFeatureBits()[ARM::HasV6T2Ops];
545  }
546 
547  bool hasV6MOps() const {
548  return getSTI().getFeatureBits()[ARM::HasV6MOps];
549  }
550 
551  bool hasV7Ops() const {
552  return getSTI().getFeatureBits()[ARM::HasV7Ops];
553  }
554 
555  bool hasV8Ops() const {
556  return getSTI().getFeatureBits()[ARM::HasV8Ops];
557  }
558 
559  bool hasV8MBaseline() const {
560  return getSTI().getFeatureBits()[ARM::HasV8MBaselineOps];
561  }
562 
563  bool hasV8MMainline() const {
564  return getSTI().getFeatureBits()[ARM::HasV8MMainlineOps];
565  }
566  bool hasV8_1MMainline() const {
567  return getSTI().getFeatureBits()[ARM::HasV8_1MMainlineOps];
568  }
569  bool hasMVE() const {
570  return getSTI().getFeatureBits()[ARM::HasMVEIntegerOps];
571  }
572  bool hasMVEFloat() const {
573  return getSTI().getFeatureBits()[ARM::HasMVEFloatOps];
574  }
575  bool hasCDE() const {
576  return getSTI().getFeatureBits()[ARM::HasCDEOps];
577  }
578  bool has8MSecExt() const {
579  return getSTI().getFeatureBits()[ARM::Feature8MSecExt];
580  }
581 
582  bool hasARM() const {
583  return !getSTI().getFeatureBits()[ARM::FeatureNoARM];
584  }
585 
586  bool hasDSP() const {
587  return getSTI().getFeatureBits()[ARM::FeatureDSP];
588  }
589 
590  bool hasD32() const {
591  return getSTI().getFeatureBits()[ARM::FeatureD32];
592  }
593 
594  bool hasV8_1aOps() const {
595  return getSTI().getFeatureBits()[ARM::HasV8_1aOps];
596  }
597 
598  bool hasRAS() const {
599  return getSTI().getFeatureBits()[ARM::FeatureRAS];
600  }
601 
602  void SwitchMode() {
603  MCSubtargetInfo &STI = copySTI();
604  auto FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
605  setAvailableFeatures(FB);
606  }
607 
608  void FixModeAfterArchChange(bool WasThumb, SMLoc Loc);
609 
610  bool isMClass() const {
611  return getSTI().getFeatureBits()[ARM::FeatureMClass];
612  }
613 
614  /// @name Auto-generated Match Functions
615  /// {
616 
617 #define GET_ASSEMBLER_HEADER
618 #include "ARMGenAsmMatcher.inc"
619 
620  /// }
621 
622  OperandMatchResultTy parseITCondCode(OperandVector &);
623  OperandMatchResultTy parseCoprocNumOperand(OperandVector &);
624  OperandMatchResultTy parseCoprocRegOperand(OperandVector &);
625  OperandMatchResultTy parseCoprocOptionOperand(OperandVector &);
626  OperandMatchResultTy parseMemBarrierOptOperand(OperandVector &);
627  OperandMatchResultTy parseTraceSyncBarrierOptOperand(OperandVector &);
628  OperandMatchResultTy parseInstSyncBarrierOptOperand(OperandVector &);
629  OperandMatchResultTy parseProcIFlagsOperand(OperandVector &);
630  OperandMatchResultTy parseMSRMaskOperand(OperandVector &);
631  OperandMatchResultTy parseBankedRegOperand(OperandVector &);
632  OperandMatchResultTy parsePKHImm(OperandVector &O, StringRef Op, int Low,
633  int High);
634  OperandMatchResultTy parsePKHLSLImm(OperandVector &O) {
635  return parsePKHImm(O, "lsl", 0, 31);
636  }
637  OperandMatchResultTy parsePKHASRImm(OperandVector &O) {
638  return parsePKHImm(O, "asr", 1, 32);
639  }
640  OperandMatchResultTy parseSetEndImm(OperandVector &);
641  OperandMatchResultTy parseShifterImm(OperandVector &);
642  OperandMatchResultTy parseRotImm(OperandVector &);
643  OperandMatchResultTy parseModImm(OperandVector &);
644  OperandMatchResultTy parseBitfield(OperandVector &);
645  OperandMatchResultTy parsePostIdxReg(OperandVector &);
646  OperandMatchResultTy parseAM3Offset(OperandVector &);
647  OperandMatchResultTy parseFPImm(OperandVector &);
648  OperandMatchResultTy parseVectorList(OperandVector &);
649  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
650  SMLoc &EndLoc);
651 
652  // Asm Match Converter Methods
653  void cvtThumbMultiply(MCInst &Inst, const OperandVector &);
654  void cvtThumbBranches(MCInst &Inst, const OperandVector &);
655  void cvtMVEVMOVQtoDReg(MCInst &Inst, const OperandVector &);
656 
657  bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
658  bool processInstruction(MCInst &Inst, const OperandVector &Ops, MCStreamer &Out);
659  bool shouldOmitCCOutOperand(StringRef Mnemonic, OperandVector &Operands);
660  bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
661  bool shouldOmitVectorPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
662  bool isITBlockTerminator(MCInst &Inst) const;
663  void fixupGNULDRDAlias(StringRef Mnemonic, OperandVector &Operands);
664  bool validateLDRDSTRD(MCInst &Inst, const OperandVector &Operands,
665  bool Load, bool ARMMode, bool Writeback);
666 
667 public:
668  enum ARMMatchResultTy {
669  Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
670  Match_RequiresNotITBlock,
671  Match_RequiresV6,
672  Match_RequiresThumb2,
673  Match_RequiresV8,
674  Match_RequiresFlagSetting,
675 #define GET_OPERAND_DIAGNOSTIC_TYPES
676 #include "ARMGenAsmMatcher.inc"
677 
678  };
679 
680  ARMAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
681  const MCInstrInfo &MII, const MCTargetOptions &Options)
682  : MCTargetAsmParser(Options, STI, MII), UC(Parser), MS(STI) {
684 
685  // Cache the MCRegisterInfo.
686  MRI = getContext().getRegisterInfo();
687 
688  // Initialize the set of available features.
689  setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
690 
691  // Add build attributes based on the selected target.
692  if (AddBuildAttributes)
693  getTargetStreamer().emitTargetAttributes(STI);
694 
695  // Not in an ITBlock to start with.
696  ITState.CurPosition = ~0U;
697 
698  VPTState.CurPosition = ~0U;
699 
700  NextSymbolIsThumb = false;
701  }
702 
703  // Implementation of the MCTargetAsmParser interface:
704  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
705  OperandMatchResultTy tryParseRegister(unsigned &RegNo, SMLoc &StartLoc,
706  SMLoc &EndLoc) override;
707  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
708  SMLoc NameLoc, OperandVector &Operands) override;
709  bool ParseDirective(AsmToken DirectiveID) override;
710 
711  unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
712  unsigned Kind) override;
713  unsigned checkTargetMatchPredicate(MCInst &Inst) override;
714 
715  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
718  bool MatchingInlineAsm) override;
719  unsigned MatchInstruction(OperandVector &Operands, MCInst &Inst,
720  SmallVectorImpl<NearMissInfo> &NearMisses,
721  bool MatchingInlineAsm, bool &EmitInITBlock,
722  MCStreamer &Out);
723 
724  struct NearMissMessage {
725  SMLoc Loc;
726  SmallString<128> Message;
727  };
728 
729  const char *getCustomOperandDiag(ARMMatchResultTy MatchError);
730 
731  void FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
732  SmallVectorImpl<NearMissMessage> &NearMissesOut,
733  SMLoc IDLoc, OperandVector &Operands);
734  void ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses, SMLoc IDLoc,
736 
737  void doBeforeLabelEmit(MCSymbol *Symbol, SMLoc IDLoc) override;
738 
739  void onLabelParsed(MCSymbol *Symbol) override;
740 };
741 
742 /// ARMOperand - Instances of this class represent a parsed ARM machine
743 /// operand.
744 class ARMOperand : public MCParsedAsmOperand {
745  enum KindTy {
746  k_CondCode,
747  k_VPTPred,
748  k_CCOut,
749  k_ITCondMask,
750  k_CoprocNum,
751  k_CoprocReg,
752  k_CoprocOption,
753  k_Immediate,
754  k_MemBarrierOpt,
755  k_InstSyncBarrierOpt,
756  k_TraceSyncBarrierOpt,
757  k_Memory,
758  k_PostIndexRegister,
759  k_MSRMask,
760  k_BankedReg,
761  k_ProcIFlags,
762  k_VectorIndex,
763  k_Register,
764  k_RegisterList,
765  k_RegisterListWithAPSR,
766  k_DPRRegisterList,
767  k_SPRRegisterList,
768  k_FPSRegisterListWithVPR,
769  k_FPDRegisterListWithVPR,
770  k_VectorList,
771  k_VectorListAllLanes,
772  k_VectorListIndexed,
773  k_ShiftedRegister,
774  k_ShiftedImmediate,
775  k_ShifterImmediate,
776  k_RotateImmediate,
777  k_ModifiedImmediate,
778  k_ConstantPoolImmediate,
779  k_BitfieldDescriptor,
780  k_Token,
781  } Kind;
782 
783  SMLoc StartLoc, EndLoc, AlignmentLoc;
785 
786  struct CCOp {
787  ARMCC::CondCodes Val;
788  };
789 
790  struct VCCOp {
791  ARMVCC::VPTCodes Val;
792  };
793 
794  struct CopOp {
795  unsigned Val;
796  };
797 
798  struct CoprocOptionOp {
799  unsigned Val;
800  };
801 
802  struct ITMaskOp {
803  unsigned Mask:4;
804  };
805 
806  struct MBOptOp {
807  ARM_MB::MemBOpt Val;
808  };
809 
810  struct ISBOptOp {
812  };
813 
814  struct TSBOptOp {
816  };
817 
818  struct IFlagsOp {
819  ARM_PROC::IFlags Val;
820  };
821 
822  struct MMaskOp {
823  unsigned Val;
824  };
825 
826  struct BankedRegOp {
827  unsigned Val;
828  };
829 
830  struct TokOp {
831  const char *Data;
832  unsigned Length;
833  };
834 
835  struct RegOp {
836  unsigned RegNum;
837  };
838 
839  // A vector register list is a sequential list of 1 to 4 registers.
840  struct VectorListOp {
841  unsigned RegNum;
842  unsigned Count;
843  unsigned LaneIndex;
844  bool isDoubleSpaced;
845  };
846 
847  struct VectorIndexOp {
848  unsigned Val;
849  };
850 
851  struct ImmOp {
852  const MCExpr *Val;
853  };
854 
855  /// Combined record for all forms of ARM address expressions.
856  struct MemoryOp {
857  unsigned BaseRegNum;
858  // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
859  // was specified.
860  const MCExpr *OffsetImm; // Offset immediate value
861  unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL
862  ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
863  unsigned ShiftImm; // shift for OffsetReg.
864  unsigned Alignment; // 0 = no alignment specified
865  // n = alignment in bytes (2, 4, 8, 16, or 32)
866  unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit)
867  };
868 
869  struct PostIdxRegOp {
870  unsigned RegNum;
871  bool isAdd;
872  ARM_AM::ShiftOpc ShiftTy;
873  unsigned ShiftImm;
874  };
875 
876  struct ShifterImmOp {
877  bool isASR;
878  unsigned Imm;
879  };
880 
881  struct RegShiftedRegOp {
882  ARM_AM::ShiftOpc ShiftTy;
883  unsigned SrcReg;
884  unsigned ShiftReg;
885  unsigned ShiftImm;
886  };
887 
888  struct RegShiftedImmOp {
889  ARM_AM::ShiftOpc ShiftTy;
890  unsigned SrcReg;
891  unsigned ShiftImm;
892  };
893 
894  struct RotImmOp {
895  unsigned Imm;
896  };
897 
898  struct ModImmOp {
899  unsigned Bits;
900  unsigned Rot;
901  };
902 
903  struct BitfieldOp {
904  unsigned LSB;
905  unsigned Width;
906  };
907 
908  union {
909  struct CCOp CC;
910  struct VCCOp VCC;
911  struct CopOp Cop;
912  struct CoprocOptionOp CoprocOption;
913  struct MBOptOp MBOpt;
914  struct ISBOptOp ISBOpt;
915  struct TSBOptOp TSBOpt;
916  struct ITMaskOp ITMask;
917  struct IFlagsOp IFlags;
918  struct MMaskOp MMask;
919  struct BankedRegOp BankedReg;
920  struct TokOp Tok;
921  struct RegOp Reg;
922  struct VectorListOp VectorList;
923  struct VectorIndexOp VectorIndex;
924  struct ImmOp Imm;
925  struct MemoryOp Memory;
926  struct PostIdxRegOp PostIdxReg;
927  struct ShifterImmOp ShifterImm;
928  struct RegShiftedRegOp RegShiftedReg;
929  struct RegShiftedImmOp RegShiftedImm;
930  struct RotImmOp RotImm;
931  struct ModImmOp ModImm;
932  struct BitfieldOp Bitfield;
933  };
934 
935 public:
936  ARMOperand(KindTy K) : Kind(K) {}
937 
938  /// getStartLoc - Get the location of the first token of this operand.
939  SMLoc getStartLoc() const override { return StartLoc; }
940 
941  /// getEndLoc - Get the location of the last token of this operand.
942  SMLoc getEndLoc() const override { return EndLoc; }
943 
944  /// getLocRange - Get the range between the first and last token of this
945  /// operand.
946  SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
947 
948  /// getAlignmentLoc - Get the location of the Alignment token of this operand.
949  SMLoc getAlignmentLoc() const {
950  assert(Kind == k_Memory && "Invalid access!");
951  return AlignmentLoc;
952  }
953 
954  ARMCC::CondCodes getCondCode() const {
955  assert(Kind == k_CondCode && "Invalid access!");
956  return CC.Val;
957  }
958 
959  ARMVCC::VPTCodes getVPTPred() const {
960  assert(isVPTPred() && "Invalid access!");
961  return VCC.Val;
962  }
963 
964  unsigned getCoproc() const {
965  assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
966  return Cop.Val;
967  }
968 
969  StringRef getToken() const {
970  assert(Kind == k_Token && "Invalid access!");
971  return StringRef(Tok.Data, Tok.Length);
972  }
973 
974  unsigned getReg() const override {
975  assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
976  return Reg.RegNum;
977  }
978 
979  const SmallVectorImpl<unsigned> &getRegList() const {
980  assert((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR ||
981  Kind == k_DPRRegisterList || Kind == k_SPRRegisterList ||
982  Kind == k_FPSRegisterListWithVPR ||
983  Kind == k_FPDRegisterListWithVPR) &&
984  "Invalid access!");
985  return Registers;
986  }
987 
988  const MCExpr *getImm() const {
989  assert(isImm() && "Invalid access!");
990  return Imm.Val;
991  }
992 
993  const MCExpr *getConstantPoolImm() const {
994  assert(isConstantPoolImm() && "Invalid access!");
995  return Imm.Val;
996  }
997 
998  unsigned getVectorIndex() const {
999  assert(Kind == k_VectorIndex && "Invalid access!");
1000  return VectorIndex.Val;
1001  }
1002 
1003  ARM_MB::MemBOpt getMemBarrierOpt() const {
1004  assert(Kind == k_MemBarrierOpt && "Invalid access!");
1005  return MBOpt.Val;
1006  }
1007 
1008  ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const {
1009  assert(Kind == k_InstSyncBarrierOpt && "Invalid access!");
1010  return ISBOpt.Val;
1011  }
1012 
1013  ARM_TSB::TraceSyncBOpt getTraceSyncBarrierOpt() const {
1014  assert(Kind == k_TraceSyncBarrierOpt && "Invalid access!");
1015  return TSBOpt.Val;
1016  }
1017 
1018  ARM_PROC::IFlags getProcIFlags() const {
1019  assert(Kind == k_ProcIFlags && "Invalid access!");
1020  return IFlags.Val;
1021  }
1022 
1023  unsigned getMSRMask() const {
1024  assert(Kind == k_MSRMask && "Invalid access!");
1025  return MMask.Val;
1026  }
1027 
1028  unsigned getBankedReg() const {
1029  assert(Kind == k_BankedReg && "Invalid access!");
1030  return BankedReg.Val;
1031  }
1032 
1033  bool isCoprocNum() const { return Kind == k_CoprocNum; }
1034  bool isCoprocReg() const { return Kind == k_CoprocReg; }
1035  bool isCoprocOption() const { return Kind == k_CoprocOption; }
1036  bool isCondCode() const { return Kind == k_CondCode; }
1037  bool isVPTPred() const { return Kind == k_VPTPred; }
1038  bool isCCOut() const { return Kind == k_CCOut; }
1039  bool isITMask() const { return Kind == k_ITCondMask; }
1040  bool isITCondCode() const { return Kind == k_CondCode; }
1041  bool isImm() const override {
1042  return Kind == k_Immediate;
1043  }
1044 
1045  bool isARMBranchTarget() const {
1046  if (!isImm()) return false;
1047 
1048  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1049  return CE->getValue() % 4 == 0;
1050  return true;
1051  }
1052 
1053 
1054  bool isThumbBranchTarget() const {
1055  if (!isImm()) return false;
1056 
1057  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1058  return CE->getValue() % 2 == 0;
1059  return true;
1060  }
1061 
1062  // checks whether this operand is an unsigned offset which fits is a field
1063  // of specified width and scaled by a specific number of bits
1064  template<unsigned width, unsigned scale>
1065  bool isUnsignedOffset() const {
1066  if (!isImm()) return false;
1067  if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1068  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1069  int64_t Val = CE->getValue();
1070  int64_t Align = 1LL << scale;
1071  int64_t Max = Align * ((1LL << width) - 1);
1072  return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max);
1073  }
1074  return false;
1075  }
1076 
1077  // checks whether this operand is an signed offset which fits is a field
1078  // of specified width and scaled by a specific number of bits
1079  template<unsigned width, unsigned scale>
1080  bool isSignedOffset() const {
1081  if (!isImm()) return false;
1082  if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1083  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1084  int64_t Val = CE->getValue();
1085  int64_t Align = 1LL << scale;
1086  int64_t Max = Align * ((1LL << (width-1)) - 1);
1087  int64_t Min = -Align * (1LL << (width-1));
1088  return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max);
1089  }
1090  return false;
1091  }
1092 
1093  // checks whether this operand is an offset suitable for the LE /
1094  // LETP instructions in Arm v8.1M
1095  bool isLEOffset() const {
1096  if (!isImm()) return false;
1097  if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1098  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1099  int64_t Val = CE->getValue();
1100  return Val < 0 && Val >= -4094 && (Val & 1) == 0;
1101  }
1102  return false;
1103  }
1104 
1105  // checks whether this operand is a memory operand computed as an offset
1106  // applied to PC. the offset may have 8 bits of magnitude and is represented
1107  // with two bits of shift. textually it may be either [pc, #imm], #imm or
1108  // relocable expression...
1109  bool isThumbMemPC() const {
1110  int64_t Val = 0;
1111  if (isImm()) {
1112  if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1113  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
1114  if (!CE) return false;
1115  Val = CE->getValue();
1116  }
1117  else if (isGPRMem()) {
1118  if(!Memory.OffsetImm || Memory.OffsetRegNum) return false;
1119  if(Memory.BaseRegNum != ARM::PC) return false;
1120  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
1121  Val = CE->getValue();
1122  else
1123  return false;
1124  }
1125  else return false;
1126  return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
1127  }
1128 
1129  bool isFPImm() const {
1130  if (!isImm()) return false;
1131  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1132  if (!CE) return false;
1133  int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1134  return Val != -1;
1135  }
1136 
1137  template<int64_t N, int64_t M>
1138  bool isImmediate() const {
1139  if (!isImm()) return false;
1140  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1141  if (!CE) return false;
1142  int64_t Value = CE->getValue();
1143  return Value >= N && Value <= M;
1144  }
1145 
1146  template<int64_t N, int64_t M>
1147  bool isImmediateS4() const {
1148  if (!isImm()) return false;
1149  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1150  if (!CE) return false;
1151  int64_t Value = CE->getValue();
1152  return ((Value & 3) == 0) && Value >= N && Value <= M;
1153  }
1154  template<int64_t N, int64_t M>
1155  bool isImmediateS2() const {
1156  if (!isImm()) return false;
1157  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1158  if (!CE) return false;
1159  int64_t Value = CE->getValue();
1160  return ((Value & 1) == 0) && Value >= N && Value <= M;
1161  }
1162  bool isFBits16() const {
1163  return isImmediate<0, 17>();
1164  }
1165  bool isFBits32() const {
1166  return isImmediate<1, 33>();
1167  }
1168  bool isImm8s4() const {
1169  return isImmediateS4<-1020, 1020>();
1170  }
1171  bool isImm7s4() const {
1172  return isImmediateS4<-508, 508>();
1173  }
1174  bool isImm7Shift0() const {
1175  return isImmediate<-127, 127>();
1176  }
1177  bool isImm7Shift1() const {
1178  return isImmediateS2<-255, 255>();
1179  }
1180  bool isImm7Shift2() const {
1181  return isImmediateS4<-511, 511>();
1182  }
1183  bool isImm7() const {
1184  return isImmediate<-127, 127>();
1185  }
1186  bool isImm0_1020s4() const {
1187  return isImmediateS4<0, 1020>();
1188  }
1189  bool isImm0_508s4() const {
1190  return isImmediateS4<0, 508>();
1191  }
1192  bool isImm0_508s4Neg() const {
1193  if (!isImm()) return false;
1194  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1195  if (!CE) return false;
1196  int64_t Value = -CE->getValue();
1197  // explicitly exclude zero. we want that to use the normal 0_508 version.
1198  return ((Value & 3) == 0) && Value > 0 && Value <= 508;
1199  }
1200 
1201  bool isImm0_4095Neg() const {
1202  if (!isImm()) return false;
1203  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1204  if (!CE) return false;
1205  // isImm0_4095Neg is used with 32-bit immediates only.
1206  // 32-bit immediates are zero extended to 64-bit when parsed,
1207  // thus simple -CE->getValue() results in a big negative number,
1208  // not a small positive number as intended
1209  if ((CE->getValue() >> 32) > 0) return false;
1210  uint32_t Value = -static_cast<uint32_t>(CE->getValue());
1211  return Value > 0 && Value < 4096;
1212  }
1213 
1214  bool isImm0_7() const {
1215  return isImmediate<0, 7>();
1216  }
1217 
1218  bool isImm1_16() const {
1219  return isImmediate<1, 16>();
1220  }
1221 
1222  bool isImm1_32() const {
1223  return isImmediate<1, 32>();
1224  }
1225 
1226  bool isImm8_255() const {
1227  return isImmediate<8, 255>();
1228  }
1229 
1230  bool isImm256_65535Expr() const {
1231  if (!isImm()) return false;
1232  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1233  // If it's not a constant expression, it'll generate a fixup and be
1234  // handled later.
1235  if (!CE) return true;
1236  int64_t Value = CE->getValue();
1237  return Value >= 256 && Value < 65536;
1238  }
1239 
1240  bool isImm0_65535Expr() const {
1241  if (!isImm()) return false;
1242  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1243  // If it's not a constant expression, it'll generate a fixup and be
1244  // handled later.
1245  if (!CE) return true;
1246  int64_t Value = CE->getValue();
1247  return Value >= 0 && Value < 65536;
1248  }
1249 
1250  bool isImm24bit() const {
1251  return isImmediate<0, 0xffffff + 1>();
1252  }
1253 
1254  bool isImmThumbSR() const {
1255  return isImmediate<1, 33>();
1256  }
1257 
1258  template<int shift>
1259  bool isExpImmValue(uint64_t Value) const {
1260  uint64_t mask = (1 << shift) - 1;
1261  if ((Value & mask) != 0 || (Value >> shift) > 0xff)
1262  return false;
1263  return true;
1264  }
1265 
1266  template<int shift>
1267  bool isExpImm() const {
1268  if (!isImm()) return false;
1269  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1270  if (!CE) return false;
1271 
1272  return isExpImmValue<shift>(CE->getValue());
1273  }
1274 
1275  template<int shift, int size>
1276  bool isInvertedExpImm() const {
1277  if (!isImm()) return false;
1278  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1279  if (!CE) return false;
1280 
1281  uint64_t OriginalValue = CE->getValue();
1282  uint64_t InvertedValue = OriginalValue ^ (((uint64_t)1 << size) - 1);
1283  return isExpImmValue<shift>(InvertedValue);
1284  }
1285 
1286  bool isPKHLSLImm() const {
1287  return isImmediate<0, 32>();
1288  }
1289 
1290  bool isPKHASRImm() const {
1291  return isImmediate<0, 33>();
1292  }
1293 
1294  bool isAdrLabel() const {
1295  // If we have an immediate that's not a constant, treat it as a label
1296  // reference needing a fixup.
1297  if (isImm() && !isa<MCConstantExpr>(getImm()))
1298  return true;
1299 
1300  // If it is a constant, it must fit into a modified immediate encoding.
1301  if (!isImm()) return false;
1302  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1303  if (!CE) return false;
1304  int64_t Value = CE->getValue();
1305  return (ARM_AM::getSOImmVal(Value) != -1 ||
1306  ARM_AM::getSOImmVal(-Value) != -1);
1307  }
1308 
1309  bool isT2SOImm() const {
1310  // If we have an immediate that's not a constant, treat it as an expression
1311  // needing a fixup.
1312  if (isImm() && !isa<MCConstantExpr>(getImm())) {
1313  // We want to avoid matching :upper16: and :lower16: as we want these
1314  // expressions to match in isImm0_65535Expr()
1315  const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(getImm());
1316  return (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
1317  ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16));
1318  }
1319  if (!isImm()) return false;
1320  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1321  if (!CE) return false;
1322  int64_t Value = CE->getValue();
1323  return ARM_AM::getT2SOImmVal(Value) != -1;
1324  }
1325 
1326  bool isT2SOImmNot() const {
1327  if (!isImm()) return false;
1328  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1329  if (!CE) return false;
1330  int64_t Value = CE->getValue();
1331  return ARM_AM::getT2SOImmVal(Value) == -1 &&
1332  ARM_AM::getT2SOImmVal(~Value) != -1;
1333  }
1334 
1335  bool isT2SOImmNeg() const {
1336  if (!isImm()) return false;
1337  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1338  if (!CE) return false;
1339  int64_t Value = CE->getValue();
1340  // Only use this when not representable as a plain so_imm.
1341  return ARM_AM::getT2SOImmVal(Value) == -1 &&
1342  ARM_AM::getT2SOImmVal(-Value) != -1;
1343  }
1344 
1345  bool isSetEndImm() const {
1346  if (!isImm()) return false;
1347  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1348  if (!CE) return false;
1349  int64_t Value = CE->getValue();
1350  return Value == 1 || Value == 0;
1351  }
1352 
1353  bool isReg() const override { return Kind == k_Register; }
1354  bool isRegList() const { return Kind == k_RegisterList; }
1355  bool isRegListWithAPSR() const {
1356  return Kind == k_RegisterListWithAPSR || Kind == k_RegisterList;
1357  }
1358  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
1359  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
1360  bool isFPSRegListWithVPR() const { return Kind == k_FPSRegisterListWithVPR; }
1361  bool isFPDRegListWithVPR() const { return Kind == k_FPDRegisterListWithVPR; }
1362  bool isToken() const override { return Kind == k_Token; }
1363  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
1364  bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
1365  bool isTraceSyncBarrierOpt() const { return Kind == k_TraceSyncBarrierOpt; }
1366  bool isMem() const override {
1367  return isGPRMem() || isMVEMem();
1368  }
1369  bool isMVEMem() const {
1370  if (Kind != k_Memory)
1371  return false;
1372  if (Memory.BaseRegNum &&
1373  !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum) &&
1374  !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Memory.BaseRegNum))
1375  return false;
1376  if (Memory.OffsetRegNum &&
1377  !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1378  Memory.OffsetRegNum))
1379  return false;
1380  return true;
1381  }
1382  bool isGPRMem() const {
1383  if (Kind != k_Memory)
1384  return false;
1385  if (Memory.BaseRegNum &&
1386  !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum))
1387  return false;
1388  if (Memory.OffsetRegNum &&
1389  !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.OffsetRegNum))
1390  return false;
1391  return true;
1392  }
1393  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
1394  bool isRegShiftedReg() const {
1395  return Kind == k_ShiftedRegister &&
1396  ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1397  RegShiftedReg.SrcReg) &&
1398  ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1399  RegShiftedReg.ShiftReg);
1400  }
1401  bool isRegShiftedImm() const {
1402  return Kind == k_ShiftedImmediate &&
1403  ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1404  RegShiftedImm.SrcReg);
1405  }
1406  bool isRotImm() const { return Kind == k_RotateImmediate; }
1407 
1408  template<unsigned Min, unsigned Max>
1409  bool isPowerTwoInRange() const {
1410  if (!isImm()) return false;
1411  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1412  if (!CE) return false;
1413  int64_t Value = CE->getValue();
1414  return Value > 0 && countPopulation((uint64_t)Value) == 1 &&
1415  Value >= Min && Value <= Max;
1416  }
1417  bool isModImm() const { return Kind == k_ModifiedImmediate; }
1418 
1419  bool isModImmNot() const {
1420  if (!isImm()) return false;
1421  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1422  if (!CE) return false;
1423  int64_t Value = CE->getValue();
1424  return ARM_AM::getSOImmVal(~Value) != -1;
1425  }
1426 
1427  bool isModImmNeg() const {
1428  if (!isImm()) return false;
1429  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1430  if (!CE) return false;
1431  int64_t Value = CE->getValue();
1432  return ARM_AM::getSOImmVal(Value) == -1 &&
1433  ARM_AM::getSOImmVal(-Value) != -1;
1434  }
1435 
1436  bool isThumbModImmNeg1_7() const {
1437  if (!isImm()) return false;
1438  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1439  if (!CE) return false;
1440  int32_t Value = -(int32_t)CE->getValue();
1441  return 0 < Value && Value < 8;
1442  }
1443 
1444  bool isThumbModImmNeg8_255() const {
1445  if (!isImm()) return false;
1446  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1447  if (!CE) return false;
1448  int32_t Value = -(int32_t)CE->getValue();
1449  return 7 < Value && Value < 256;
1450  }
1451 
1452  bool isConstantPoolImm() const { return Kind == k_ConstantPoolImmediate; }
1453  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
1454  bool isPostIdxRegShifted() const {
1455  return Kind == k_PostIndexRegister &&
1456  ARMMCRegisterClasses[ARM::GPRRegClassID].contains(PostIdxReg.RegNum);
1457  }
1458  bool isPostIdxReg() const {
1459  return isPostIdxRegShifted() && PostIdxReg.ShiftTy == ARM_AM::no_shift;
1460  }
1461  bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const {
1462  if (!isGPRMem())
1463  return false;
1464  // No offset of any kind.
1465  return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1466  (alignOK || Memory.Alignment == Alignment);
1467  }
1468  bool isMemNoOffsetT2(bool alignOK = false, unsigned Alignment = 0) const {
1469  if (!isGPRMem())
1470  return false;
1471 
1472  if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1473  Memory.BaseRegNum))
1474  return false;
1475 
1476  // No offset of any kind.
1477  return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1478  (alignOK || Memory.Alignment == Alignment);
1479  }
1480  bool isMemNoOffsetT2NoSp(bool alignOK = false, unsigned Alignment = 0) const {
1481  if (!isGPRMem())
1482  return false;
1483 
1484  if (!ARMMCRegisterClasses[ARM::rGPRRegClassID].contains(
1485  Memory.BaseRegNum))
1486  return false;
1487 
1488  // No offset of any kind.
1489  return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1490  (alignOK || Memory.Alignment == Alignment);
1491  }
1492  bool isMemNoOffsetT(bool alignOK = false, unsigned Alignment = 0) const {
1493  if (!isGPRMem())
1494  return false;
1495 
1496  if (!ARMMCRegisterClasses[ARM::tGPRRegClassID].contains(
1497  Memory.BaseRegNum))
1498  return false;
1499 
1500  // No offset of any kind.
1501  return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1502  (alignOK || Memory.Alignment == Alignment);
1503  }
1504  bool isMemPCRelImm12() const {
1505  if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1506  return false;
1507  // Base register must be PC.
1508  if (Memory.BaseRegNum != ARM::PC)
1509  return false;
1510  // Immediate offset in range [-4095, 4095].
1511  if (!Memory.OffsetImm) return true;
1512  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1513  int64_t Val = CE->getValue();
1514  return (Val > -4096 && Val < 4096) ||
1516  }
1517  return false;
1518  }
1519 
1520  bool isAlignedMemory() const {
1521  return isMemNoOffset(true);
1522  }
1523 
1524  bool isAlignedMemoryNone() const {
1525  return isMemNoOffset(false, 0);
1526  }
1527 
1528  bool isDupAlignedMemoryNone() const {
1529  return isMemNoOffset(false, 0);
1530  }
1531 
1532  bool isAlignedMemory16() const {
1533  if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1534  return true;
1535  return isMemNoOffset(false, 0);
1536  }
1537 
1538  bool isDupAlignedMemory16() const {
1539  if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1540  return true;
1541  return isMemNoOffset(false, 0);
1542  }
1543 
1544  bool isAlignedMemory32() const {
1545  if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1546  return true;
1547  return isMemNoOffset(false, 0);
1548  }
1549 
1550  bool isDupAlignedMemory32() const {
1551  if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1552  return true;
1553  return isMemNoOffset(false, 0);
1554  }
1555 
1556  bool isAlignedMemory64() const {
1557  if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1558  return true;
1559  return isMemNoOffset(false, 0);
1560  }
1561 
1562  bool isDupAlignedMemory64() const {
1563  if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1564  return true;
1565  return isMemNoOffset(false, 0);
1566  }
1567 
1568  bool isAlignedMemory64or128() const {
1569  if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1570  return true;
1571  if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1572  return true;
1573  return isMemNoOffset(false, 0);
1574  }
1575 
1576  bool isDupAlignedMemory64or128() const {
1577  if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1578  return true;
1579  if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1580  return true;
1581  return isMemNoOffset(false, 0);
1582  }
1583 
1584  bool isAlignedMemory64or128or256() const {
1585  if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1586  return true;
1587  if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1588  return true;
1589  if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32.
1590  return true;
1591  return isMemNoOffset(false, 0);
1592  }
1593 
1594  bool isAddrMode2() const {
1595  if (!isGPRMem() || Memory.Alignment != 0) return false;
1596  // Check for register offset.
1597  if (Memory.OffsetRegNum) return true;
1598  // Immediate offset in range [-4095, 4095].
1599  if (!Memory.OffsetImm) return true;
1600  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1601  int64_t Val = CE->getValue();
1602  return Val > -4096 && Val < 4096;
1603  }
1604  return false;
1605  }
1606 
1607  bool isAM2OffsetImm() const {
1608  if (!isImm()) return false;
1609  // Immediate offset in range [-4095, 4095].
1610  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1611  if (!CE) return false;
1612  int64_t Val = CE->getValue();
1613  return (Val == std::numeric_limits<int32_t>::min()) ||
1614  (Val > -4096 && Val < 4096);
1615  }
1616 
1617  bool isAddrMode3() const {
1618  // If we have an immediate that's not a constant, treat it as a label
1619  // reference needing a fixup. If it is a constant, it's something else
1620  // and we reject it.
1621  if (isImm() && !isa<MCConstantExpr>(getImm()))
1622  return true;
1623  if (!isGPRMem() || Memory.Alignment != 0) return false;
1624  // No shifts are legal for AM3.
1625  if (Memory.ShiftType != ARM_AM::no_shift) return false;
1626  // Check for register offset.
1627  if (Memory.OffsetRegNum) return true;
1628  // Immediate offset in range [-255, 255].
1629  if (!Memory.OffsetImm) return true;
1630  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1631  int64_t Val = CE->getValue();
1632  // The #-0 offset is encoded as std::numeric_limits<int32_t>::min(), and
1633  // we have to check for this too.
1634  return (Val > -256 && Val < 256) ||
1636  }
1637  return false;
1638  }
1639 
1640  bool isAM3Offset() const {
1641  if (isPostIdxReg())
1642  return true;
1643  if (!isImm())
1644  return false;
1645  // Immediate offset in range [-255, 255].
1646  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1647  if (!CE) return false;
1648  int64_t Val = CE->getValue();
1649  // Special case, #-0 is std::numeric_limits<int32_t>::min().
1650  return (Val > -256 && Val < 256) ||
1652  }
1653 
1654  bool isAddrMode5() const {
1655  // If we have an immediate that's not a constant, treat it as a label
1656  // reference needing a fixup. If it is a constant, it's something else
1657  // and we reject it.
1658  if (isImm() && !isa<MCConstantExpr>(getImm()))
1659  return true;
1660  if (!isGPRMem() || Memory.Alignment != 0) return false;
1661  // Check for register offset.
1662  if (Memory.OffsetRegNum) return false;
1663  // Immediate offset in range [-1020, 1020] and a multiple of 4.
1664  if (!Memory.OffsetImm) return true;
1665  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1666  int64_t Val = CE->getValue();
1667  return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1669  }
1670  return false;
1671  }
1672 
1673  bool isAddrMode5FP16() const {
1674  // If we have an immediate that's not a constant, treat it as a label
1675  // reference needing a fixup. If it is a constant, it's something else
1676  // and we reject it.
1677  if (isImm() && !isa<MCConstantExpr>(getImm()))
1678  return true;
1679  if (!isGPRMem() || Memory.Alignment != 0) return false;
1680  // Check for register offset.
1681  if (Memory.OffsetRegNum) return false;
1682  // Immediate offset in range [-510, 510] and a multiple of 2.
1683  if (!Memory.OffsetImm) return true;
1684  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1685  int64_t Val = CE->getValue();
1686  return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) ||
1688  }
1689  return false;
1690  }
1691 
1692  bool isMemTBB() const {
1693  if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1694  Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1695  return false;
1696  return true;
1697  }
1698 
1699  bool isMemTBH() const {
1700  if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1701  Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
1702  Memory.Alignment != 0 )
1703  return false;
1704  return true;
1705  }
1706 
1707  bool isMemRegOffset() const {
1708  if (!isGPRMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
1709  return false;
1710  return true;
1711  }
1712 
1713  bool isT2MemRegOffset() const {
1714  if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1715  Memory.Alignment != 0 || Memory.BaseRegNum == ARM::PC)
1716  return false;
1717  // Only lsl #{0, 1, 2, 3} allowed.
1718  if (Memory.ShiftType == ARM_AM::no_shift)
1719  return true;
1720  if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
1721  return false;
1722  return true;
1723  }
1724 
1725  bool isMemThumbRR() const {
1726  // Thumb reg+reg addressing is simple. Just two registers, a base and
1727  // an offset. No shifts, negations or any other complicating factors.
1728  if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1729  Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1730  return false;
1731  return isARMLowRegister(Memory.BaseRegNum) &&
1732  (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
1733  }
1734 
1735  bool isMemThumbRIs4() const {
1736  if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1737  !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1738  return false;
1739  // Immediate offset, multiple of 4 in range [0, 124].
1740  if (!Memory.OffsetImm) return true;
1741  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1742  int64_t Val = CE->getValue();
1743  return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1744  }
1745  return false;
1746  }
1747 
1748  bool isMemThumbRIs2() const {
1749  if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1750  !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1751  return false;
1752  // Immediate offset, multiple of 4 in range [0, 62].
1753  if (!Memory.OffsetImm) return true;
1754  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1755  int64_t Val = CE->getValue();
1756  return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1757  }
1758  return false;
1759  }
1760 
1761  bool isMemThumbRIs1() const {
1762  if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1763  !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1764  return false;
1765  // Immediate offset in range [0, 31].
1766  if (!Memory.OffsetImm) return true;
1767  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1768  int64_t Val = CE->getValue();
1769  return Val >= 0 && Val <= 31;
1770  }
1771  return false;
1772  }
1773 
1774  bool isMemThumbSPI() const {
1775  if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1776  Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1777  return false;
1778  // Immediate offset, multiple of 4 in range [0, 1020].
1779  if (!Memory.OffsetImm) return true;
1780  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1781  int64_t Val = CE->getValue();
1782  return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1783  }
1784  return false;
1785  }
1786 
1787  bool isMemImm8s4Offset() const {
1788  // If we have an immediate that's not a constant, treat it as a label
1789  // reference needing a fixup. If it is a constant, it's something else
1790  // and we reject it.
1791  if (isImm() && !isa<MCConstantExpr>(getImm()))
1792  return true;
1793  if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1794  return false;
1795  // Immediate offset a multiple of 4 in range [-1020, 1020].
1796  if (!Memory.OffsetImm) return true;
1797  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1798  int64_t Val = CE->getValue();
1799  // Special case, #-0 is std::numeric_limits<int32_t>::min().
1800  return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) ||
1802  }
1803  return false;
1804  }
1805 
1806  bool isMemImm7s4Offset() const {
1807  // If we have an immediate that's not a constant, treat it as a label
1808  // reference needing a fixup. If it is a constant, it's something else
1809  // and we reject it.
1810  if (isImm() && !isa<MCConstantExpr>(getImm()))
1811  return true;
1812  if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 ||
1813  !ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1814  Memory.BaseRegNum))
1815  return false;
1816  // Immediate offset a multiple of 4 in range [-508, 508].
1817  if (!Memory.OffsetImm) return true;
1818  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1819  int64_t Val = CE->getValue();
1820  // Special case, #-0 is INT32_MIN.
1821  return (Val >= -508 && Val <= 508 && (Val & 3) == 0) || Val == INT32_MIN;
1822  }
1823  return false;
1824  }
1825 
1826  bool isMemImm0_1020s4Offset() const {
1827  if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1828  return false;
1829  // Immediate offset a multiple of 4 in range [0, 1020].
1830  if (!Memory.OffsetImm) return true;
1831  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1832  int64_t Val = CE->getValue();
1833  return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1834  }
1835  return false;
1836  }
1837 
1838  bool isMemImm8Offset() const {
1839  if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1840  return false;
1841  // Base reg of PC isn't allowed for these encodings.
1842  if (Memory.BaseRegNum == ARM::PC) return false;
1843  // Immediate offset in range [-255, 255].
1844  if (!Memory.OffsetImm) return true;
1845  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1846  int64_t Val = CE->getValue();
1847  return (Val == std::numeric_limits<int32_t>::min()) ||
1848  (Val > -256 && Val < 256);
1849  }
1850  return false;
1851  }
1852 
1853  template<unsigned Bits, unsigned RegClassID>
1854  bool isMemImm7ShiftedOffset() const {
1855  if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 ||
1856  !ARMMCRegisterClasses[RegClassID].contains(Memory.BaseRegNum))
1857  return false;
1858 
1859  // Expect an immediate offset equal to an element of the range
1860  // [-127, 127], shifted left by Bits.
1861 
1862  if (!Memory.OffsetImm) return true;
1863  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1864  int64_t Val = CE->getValue();
1865 
1866  // INT32_MIN is a special-case value (indicating the encoding with
1867  // zero offset and the subtract bit set)
1868  if (Val == INT32_MIN)
1869  return true;
1870 
1871  unsigned Divisor = 1U << Bits;
1872 
1873  // Check that the low bits are zero
1874  if (Val % Divisor != 0)
1875  return false;
1876 
1877  // Check that the remaining offset is within range.
1878  Val /= Divisor;
1879  return (Val >= -127 && Val <= 127);
1880  }
1881  return false;
1882  }
1883 
1884  template <int shift> bool isMemRegRQOffset() const {
1885  if (!isMVEMem() || Memory.OffsetImm != nullptr || Memory.Alignment != 0)
1886  return false;
1887 
1888  if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1889  Memory.BaseRegNum))
1890  return false;
1891  if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1892  Memory.OffsetRegNum))
1893  return false;
1894 
1895  if (shift == 0 && Memory.ShiftType != ARM_AM::no_shift)
1896  return false;
1897 
1898  if (shift > 0 &&
1899  (Memory.ShiftType != ARM_AM::uxtw || Memory.ShiftImm != shift))
1900  return false;
1901 
1902  return true;
1903  }
1904 
1905  template <int shift> bool isMemRegQOffset() const {
1906  if (!isMVEMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1907  return false;
1908 
1909  if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1910  Memory.BaseRegNum))
1911  return false;
1912 
1913  if (!Memory.OffsetImm)
1914  return true;
1915  static_assert(shift < 56,
1916  "Such that we dont shift by a value higher than 62");
1917  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1918  int64_t Val = CE->getValue();
1919 
1920  // The value must be a multiple of (1 << shift)
1921  if ((Val & ((1U << shift) - 1)) != 0)
1922  return false;
1923 
1924  // And be in the right range, depending on the amount that it is shifted
1925  // by. Shift 0, is equal to 7 unsigned bits, the sign bit is set
1926  // separately.
1927  int64_t Range = (1U << (7 + shift)) - 1;
1928  return (Val == INT32_MIN) || (Val > -Range && Val < Range);
1929  }
1930  return false;
1931  }
1932 
1933  bool isMemPosImm8Offset() const {
1934  if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1935  return false;
1936  // Immediate offset in range [0, 255].
1937  if (!Memory.OffsetImm) return true;
1938  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1939  int64_t Val = CE->getValue();
1940  return Val >= 0 && Val < 256;
1941  }
1942  return false;
1943  }
1944 
1945  bool isMemNegImm8Offset() const {
1946  if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1947  return false;
1948  // Base reg of PC isn't allowed for these encodings.
1949  if (Memory.BaseRegNum == ARM::PC) return false;
1950  // Immediate offset in range [-255, -1].
1951  if (!Memory.OffsetImm) return false;
1952  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1953  int64_t Val = CE->getValue();
1954  return (Val == std::numeric_limits<int32_t>::min()) ||
1955  (Val > -256 && Val < 0);
1956  }
1957  return false;
1958  }
1959 
1960  bool isMemUImm12Offset() const {
1961  if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1962  return false;
1963  // Immediate offset in range [0, 4095].
1964  if (!Memory.OffsetImm) return true;
1965  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1966  int64_t Val = CE->getValue();
1967  return (Val >= 0 && Val < 4096);
1968  }
1969  return false;
1970  }
1971 
1972  bool isMemImm12Offset() const {
1973  // If we have an immediate that's not a constant, treat it as a label
1974  // reference needing a fixup. If it is a constant, it's something else
1975  // and we reject it.
1976 
1977  if (isImm() && !isa<MCConstantExpr>(getImm()))
1978  return true;
1979 
1980  if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1981  return false;
1982  // Immediate offset in range [-4095, 4095].
1983  if (!Memory.OffsetImm) return true;
1984  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1985  int64_t Val = CE->getValue();
1986  return (Val > -4096 && Val < 4096) ||
1988  }
1989  // If we have an immediate that's not a constant, treat it as a
1990  // symbolic expression needing a fixup.
1991  return true;
1992  }
1993 
1994  bool isConstPoolAsmImm() const {
1995  // Delay processing of Constant Pool Immediate, this will turn into
1996  // a constant. Match no other operand
1997  return (isConstantPoolImm());
1998  }
1999 
2000  bool isPostIdxImm8() const {
2001  if (!isImm()) return false;
2002  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2003  if (!CE) return false;
2004  int64_t Val = CE->getValue();
2005  return (Val > -256 && Val < 256) ||
2007  }
2008 
2009  bool isPostIdxImm8s4() const {
2010  if (!isImm()) return false;
2011  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2012  if (!CE) return false;
2013  int64_t Val = CE->getValue();
2014  return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
2016  }
2017 
2018  bool isMSRMask() const { return Kind == k_MSRMask; }
2019  bool isBankedReg() const { return Kind == k_BankedReg; }
2020  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
2021 
2022  // NEON operands.
2023  bool isSingleSpacedVectorList() const {
2024  return Kind == k_VectorList && !VectorList.isDoubleSpaced;
2025  }
2026 
2027  bool isDoubleSpacedVectorList() const {
2028  return Kind == k_VectorList && VectorList.isDoubleSpaced;
2029  }
2030 
2031  bool isVecListOneD() const {
2032  if (!isSingleSpacedVectorList()) return false;
2033  return VectorList.Count == 1;
2034  }
2035 
2036  bool isVecListTwoMQ() const {
2037  return isSingleSpacedVectorList() && VectorList.Count == 2 &&
2038  ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2039  VectorList.RegNum);
2040  }
2041 
2042  bool isVecListDPair() const {
2043  if (!isSingleSpacedVectorList()) return false;
2044  return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2045  .contains(VectorList.RegNum));
2046  }
2047 
2048  bool isVecListThreeD() const {
2049  if (!isSingleSpacedVectorList()) return false;
2050  return VectorList.Count == 3;
2051  }
2052 
2053  bool isVecListFourD() const {
2054  if (!isSingleSpacedVectorList()) return false;
2055  return VectorList.Count == 4;
2056  }
2057 
2058  bool isVecListDPairSpaced() const {
2059  if (Kind != k_VectorList) return false;
2060  if (isSingleSpacedVectorList()) return false;
2061  return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
2062  .contains(VectorList.RegNum));
2063  }
2064 
2065  bool isVecListThreeQ() const {
2066  if (!isDoubleSpacedVectorList()) return false;
2067  return VectorList.Count == 3;
2068  }
2069 
2070  bool isVecListFourQ() const {
2071  if (!isDoubleSpacedVectorList()) return false;
2072  return VectorList.Count == 4;
2073  }
2074 
2075  bool isVecListFourMQ() const {
2076  return isSingleSpacedVectorList() && VectorList.Count == 4 &&
2077  ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2078  VectorList.RegNum);
2079  }
2080 
2081  bool isSingleSpacedVectorAllLanes() const {
2082  return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
2083  }
2084 
2085  bool isDoubleSpacedVectorAllLanes() const {
2086  return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
2087  }
2088 
2089  bool isVecListOneDAllLanes() const {
2090  if (!isSingleSpacedVectorAllLanes()) return false;
2091  return VectorList.Count == 1;
2092  }
2093 
2094  bool isVecListDPairAllLanes() const {
2095  if (!isSingleSpacedVectorAllLanes()) return false;
2096  return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2097  .contains(VectorList.RegNum));
2098  }
2099 
2100  bool isVecListDPairSpacedAllLanes() const {
2101  if (!isDoubleSpacedVectorAllLanes()) return false;
2102  return VectorList.Count == 2;
2103  }
2104 
2105  bool isVecListThreeDAllLanes() const {
2106  if (!isSingleSpacedVectorAllLanes()) return false;
2107  return VectorList.Count == 3;
2108  }
2109 
2110  bool isVecListThreeQAllLanes() const {
2111  if (!isDoubleSpacedVectorAllLanes()) return false;
2112  return VectorList.Count == 3;
2113  }
2114 
2115  bool isVecListFourDAllLanes() const {
2116  if (!isSingleSpacedVectorAllLanes()) return false;
2117  return VectorList.Count == 4;
2118  }
2119 
2120  bool isVecListFourQAllLanes() const {
2121  if (!isDoubleSpacedVectorAllLanes()) return false;
2122  return VectorList.Count == 4;
2123  }
2124 
2125  bool isSingleSpacedVectorIndexed() const {
2126  return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
2127  }
2128 
2129  bool isDoubleSpacedVectorIndexed() const {
2130  return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
2131  }
2132 
2133  bool isVecListOneDByteIndexed() const {
2134  if (!isSingleSpacedVectorIndexed()) return false;
2135  return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
2136  }
2137 
2138  bool isVecListOneDHWordIndexed() const {
2139  if (!isSingleSpacedVectorIndexed()) return false;
2140  return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
2141  }
2142 
2143  bool isVecListOneDWordIndexed() const {
2144  if (!isSingleSpacedVectorIndexed()) return false;
2145  return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
2146  }
2147 
2148  bool isVecListTwoDByteIndexed() const {
2149  if (!isSingleSpacedVectorIndexed()) return false;
2150  return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
2151  }
2152 
2153  bool isVecListTwoDHWordIndexed() const {
2154  if (!isSingleSpacedVectorIndexed()) return false;
2155  return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2156  }
2157 
2158  bool isVecListTwoQWordIndexed() const {
2159  if (!isDoubleSpacedVectorIndexed()) return false;
2160  return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2161  }
2162 
2163  bool isVecListTwoQHWordIndexed() const {
2164  if (!isDoubleSpacedVectorIndexed()) return false;
2165  return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2166  }
2167 
2168  bool isVecListTwoDWordIndexed() const {
2169  if (!isSingleSpacedVectorIndexed()) return false;
2170  return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2171  }
2172 
2173  bool isVecListThreeDByteIndexed() const {
2174  if (!isSingleSpacedVectorIndexed()) return false;
2175  return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
2176  }
2177 
2178  bool isVecListThreeDHWordIndexed() const {
2179  if (!isSingleSpacedVectorIndexed()) return false;
2180  return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2181  }
2182 
2183  bool isVecListThreeQWordIndexed() const {
2184  if (!isDoubleSpacedVectorIndexed()) return false;
2185  return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2186  }
2187 
2188  bool isVecListThreeQHWordIndexed() const {
2189  if (!isDoubleSpacedVectorIndexed()) return false;
2190  return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2191  }
2192 
2193  bool isVecListThreeDWordIndexed() const {
2194  if (!isSingleSpacedVectorIndexed()) return false;
2195  return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2196  }
2197 
2198  bool isVecListFourDByteIndexed() const {
2199  if (!isSingleSpacedVectorIndexed()) return false;
2200  return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
2201  }
2202 
2203  bool isVecListFourDHWordIndexed() const {
2204  if (!isSingleSpacedVectorIndexed()) return false;
2205  return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2206  }
2207 
2208  bool isVecListFourQWordIndexed() const {
2209  if (!isDoubleSpacedVectorIndexed()) return false;
2210  return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2211  }
2212 
2213  bool isVecListFourQHWordIndexed() const {
2214  if (!isDoubleSpacedVectorIndexed()) return false;
2215  return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2216  }
2217 
2218  bool isVecListFourDWordIndexed() const {
2219  if (!isSingleSpacedVectorIndexed()) return false;
2220  return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2221  }
2222 
2223  bool isVectorIndex() const { return Kind == k_VectorIndex; }
2224 
2225  template <unsigned NumLanes>
2226  bool isVectorIndexInRange() const {
2227  if (Kind != k_VectorIndex) return false;
2228  return VectorIndex.Val < NumLanes;
2229  }
2230 
2231  bool isVectorIndex8() const { return isVectorIndexInRange<8>(); }
2232  bool isVectorIndex16() const { return isVectorIndexInRange<4>(); }
2233  bool isVectorIndex32() const { return isVectorIndexInRange<2>(); }
2234  bool isVectorIndex64() const { return isVectorIndexInRange<1>(); }
2235 
2236  template<int PermittedValue, int OtherPermittedValue>
2237  bool isMVEPairVectorIndex() const {
2238  if (Kind != k_VectorIndex) return false;
2239  return VectorIndex.Val == PermittedValue ||
2240  VectorIndex.Val == OtherPermittedValue;
2241  }
2242 
2243  bool isNEONi8splat() const {
2244  if (!isImm()) return false;
2245  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2246  // Must be a constant.
2247  if (!CE) return false;
2248  int64_t Value = CE->getValue();
2249  // i8 value splatted across 8 bytes. The immediate is just the 8 byte
2250  // value.
2251  return Value >= 0 && Value < 256;
2252  }
2253 
2254  bool isNEONi16splat() const {
2255  if (isNEONByteReplicate(2))
2256  return false; // Leave that for bytes replication and forbid by default.
2257  if (!isImm())
2258  return false;
2259  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2260  // Must be a constant.
2261  if (!CE) return false;
2262  unsigned Value = CE->getValue();
2263  return ARM_AM::isNEONi16splat(Value);
2264  }
2265 
2266  bool isNEONi16splatNot() const {
2267  if (!isImm())
2268  return false;
2269  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2270  // Must be a constant.
2271  if (!CE) return false;
2272  unsigned Value = CE->getValue();
2273  return ARM_AM::isNEONi16splat(~Value & 0xffff);
2274  }
2275 
2276  bool isNEONi32splat() const {
2277  if (isNEONByteReplicate(4))
2278  return false; // Leave that for bytes replication and forbid by default.
2279  if (!isImm())
2280  return false;
2281  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2282  // Must be a constant.
2283  if (!CE) return false;
2284  unsigned Value = CE->getValue();
2285  return ARM_AM::isNEONi32splat(Value);
2286  }
2287 
2288  bool isNEONi32splatNot() const {
2289  if (!isImm())
2290  return false;
2291  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2292  // Must be a constant.
2293  if (!CE) return false;
2294  unsigned Value = CE->getValue();
2295  return ARM_AM::isNEONi32splat(~Value);
2296  }
2297 
2298  static bool isValidNEONi32vmovImm(int64_t Value) {
2299  // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
2300  // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
2301  return ((Value & 0xffffffffffffff00) == 0) ||
2302  ((Value & 0xffffffffffff00ff) == 0) ||
2303  ((Value & 0xffffffffff00ffff) == 0) ||
2304  ((Value & 0xffffffff00ffffff) == 0) ||
2305  ((Value & 0xffffffffffff00ff) == 0xff) ||
2306  ((Value & 0xffffffffff00ffff) == 0xffff);
2307  }
2308 
2309  bool isNEONReplicate(unsigned Width, unsigned NumElems, bool Inv) const {
2310  assert((Width == 8 || Width == 16 || Width == 32) &&
2311  "Invalid element width");
2312  assert(NumElems * Width <= 64 && "Invalid result width");
2313 
2314  if (!isImm())
2315  return false;
2316  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2317  // Must be a constant.
2318  if (!CE)
2319  return false;
2320  int64_t Value = CE->getValue();
2321  if (!Value)
2322  return false; // Don't bother with zero.
2323  if (Inv)
2324  Value = ~Value;
2325 
2326  uint64_t Mask = (1ull << Width) - 1;
2327  uint64_t Elem = Value & Mask;
2328  if (Width == 16 && (Elem & 0x00ff) != 0 && (Elem & 0xff00) != 0)
2329  return false;
2330  if (Width == 32 && !isValidNEONi32vmovImm(Elem))
2331  return false;
2332 
2333  for (unsigned i = 1; i < NumElems; ++i) {
2334  Value >>= Width;
2335  if ((Value & Mask) != Elem)
2336  return false;
2337  }
2338  return true;
2339  }
2340 
2341  bool isNEONByteReplicate(unsigned NumBytes) const {
2342  return isNEONReplicate(8, NumBytes, false);
2343  }
2344 
2345  static void checkNeonReplicateArgs(unsigned FromW, unsigned ToW) {
2346  assert((FromW == 8 || FromW == 16 || FromW == 32) &&
2347  "Invalid source width");
2348  assert((ToW == 16 || ToW == 32 || ToW == 64) &&
2349  "Invalid destination width");
2350  assert(FromW < ToW && "ToW is not less than FromW");
2351  }
2352 
2353  template<unsigned FromW, unsigned ToW>
2354  bool isNEONmovReplicate() const {
2355  checkNeonReplicateArgs(FromW, ToW);
2356  if (ToW == 64 && isNEONi64splat())
2357  return false;
2358  return isNEONReplicate(FromW, ToW / FromW, false);
2359  }
2360 
2361  template<unsigned FromW, unsigned ToW>
2362  bool isNEONinvReplicate() const {
2363  checkNeonReplicateArgs(FromW, ToW);
2364  return isNEONReplicate(FromW, ToW / FromW, true);
2365  }
2366 
2367  bool isNEONi32vmov() const {
2368  if (isNEONByteReplicate(4))
2369  return false; // Let it to be classified as byte-replicate case.
2370  if (!isImm())
2371  return false;
2372  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2373  // Must be a constant.
2374  if (!CE)
2375  return false;
2376  return isValidNEONi32vmovImm(CE->getValue());
2377  }
2378 
2379  bool isNEONi32vmovNeg() const {
2380  if (!isImm()) return false;
2381  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2382  // Must be a constant.
2383  if (!CE) return false;
2384  return isValidNEONi32vmovImm(~CE->getValue());
2385  }
2386 
2387  bool isNEONi64splat() const {
2388  if (!isImm()) return false;
2389  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2390  // Must be a constant.
2391  if (!CE) return false;
2392  uint64_t Value = CE->getValue();
2393  // i64 value with each byte being either 0 or 0xff.
2394  for (unsigned i = 0; i < 8; ++i, Value >>= 8)
2395  if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
2396  return true;
2397  }
2398 
2399  template<int64_t Angle, int64_t Remainder>
2400  bool isComplexRotation() const {
2401  if (!isImm()) return false;
2402 
2403  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2404  if (!CE) return false;
2405  uint64_t Value = CE->getValue();
2406 
2407  return (Value % Angle == Remainder && Value <= 270);
2408  }
2409 
2410  bool isMVELongShift() const {
2411  if (!isImm()) return false;
2412  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2413  // Must be a constant.
2414  if (!CE) return false;
2415  uint64_t Value = CE->getValue();
2416  return Value >= 1 && Value <= 32;
2417  }
2418 
2419  bool isMveSaturateOp() const {
2420  if (!isImm()) return false;
2421  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2422  if (!CE) return false;
2423  uint64_t Value = CE->getValue();
2424  return Value == 48 || Value == 64;
2425  }
2426 
2427  bool isITCondCodeNoAL() const {
2428  if (!isITCondCode()) return false;
2430  return CC != ARMCC::AL;
2431  }
2432 
2433  bool isITCondCodeRestrictedI() const {
2434  if (!isITCondCode())
2435  return false;
2437  return CC == ARMCC::EQ || CC == ARMCC::NE;
2438  }
2439 
2440  bool isITCondCodeRestrictedS() const {
2441  if (!isITCondCode())
2442  return false;
2444  return CC == ARMCC::LT || CC == ARMCC::GT || CC == ARMCC::LE ||
2445  CC == ARMCC::GE;
2446  }
2447 
2448  bool isITCondCodeRestrictedU() const {
2449  if (!isITCondCode())
2450  return false;
2452  return CC == ARMCC::HS || CC == ARMCC::HI;
2453  }
2454 
2455  bool isITCondCodeRestrictedFP() const {
2456  if (!isITCondCode())
2457  return false;
2459  return CC == ARMCC::EQ || CC == ARMCC::NE || CC == ARMCC::LT ||
2460  CC == ARMCC::GT || CC == ARMCC::LE || CC == ARMCC::GE;
2461  }
2462 
2463  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
2464  // Add as immediates when possible. Null MCExpr = 0.
2465  if (!Expr)
2467  else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
2468  Inst.addOperand(MCOperand::createImm(CE->getValue()));
2469  else
2470  Inst.addOperand(MCOperand::createExpr(Expr));
2471  }
2472 
2473  void addARMBranchTargetOperands(MCInst &Inst, unsigned N) const {
2474  assert(N == 1 && "Invalid number of operands!");
2475  addExpr(Inst, getImm());
2476  }
2477 
2478  void addThumbBranchTargetOperands(MCInst &Inst, unsigned N) const {
2479  assert(N == 1 && "Invalid number of operands!");
2480  addExpr(Inst, getImm());
2481  }
2482 
2483  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
2484  assert(N == 2 && "Invalid number of operands!");
2485  Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2486  unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
2487  Inst.addOperand(MCOperand::createReg(RegNum));
2488  }
2489 
2490  void addVPTPredNOperands(MCInst &Inst, unsigned N) const {
2491  assert(N == 3 && "Invalid number of operands!");
2492  Inst.addOperand(MCOperand::createImm(unsigned(getVPTPred())));
2493  unsigned RegNum = getVPTPred() == ARMVCC::None ? 0: ARM::P0;
2494  Inst.addOperand(MCOperand::createReg(RegNum));
2496  }
2497 
2498  void addVPTPredROperands(MCInst &Inst, unsigned N) const {
2499  assert(N == 4 && "Invalid number of operands!");
2500  addVPTPredNOperands(Inst, N-1);
2501  unsigned RegNum;
2502  if (getVPTPred() == ARMVCC::None) {
2503  RegNum = 0;
2504  } else {
2505  unsigned NextOpIndex = Inst.getNumOperands();
2506  const MCInstrDesc &MCID = ARMInsts[Inst.getOpcode()];
2507  int TiedOp = MCID.getOperandConstraint(NextOpIndex, MCOI::TIED_TO);
2508  assert(TiedOp >= 0 &&
2509  "Inactive register in vpred_r is not tied to an output!");
2510  RegNum = Inst.getOperand(TiedOp).getReg();
2511  }
2512  Inst.addOperand(MCOperand::createReg(RegNum));
2513  }
2514 
2515  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
2516  assert(N == 1 && "Invalid number of operands!");
2517  Inst.addOperand(MCOperand::createImm(getCoproc()));
2518  }
2519 
2520  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
2521  assert(N == 1 && "Invalid number of operands!");
2522  Inst.addOperand(MCOperand::createImm(getCoproc()));
2523  }
2524 
2525  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
2526  assert(N == 1 && "Invalid number of operands!");
2527  Inst.addOperand(MCOperand::createImm(CoprocOption.Val));
2528  }
2529 
2530  void addITMaskOperands(MCInst &Inst, unsigned N) const {
2531  assert(N == 1 && "Invalid number of operands!");
2532  Inst.addOperand(MCOperand::createImm(ITMask.Mask));
2533  }
2534 
2535  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
2536  assert(N == 1 && "Invalid number of operands!");
2537  Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2538  }
2539 
2540  void addITCondCodeInvOperands(MCInst &Inst, unsigned N) const {
2541  assert(N == 1 && "Invalid number of operands!");
2543  }
2544 
2545  void addCCOutOperands(MCInst &Inst, unsigned N) const {
2546  assert(N == 1 && "Invalid number of operands!");
2548  }
2549 
2550  void addRegOperands(MCInst &Inst, unsigned N) const {
2551  assert(N == 1 && "Invalid number of operands!");
2553  }
2554 
2555  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
2556  assert(N == 3 && "Invalid number of operands!");
2557  assert(isRegShiftedReg() &&
2558  "addRegShiftedRegOperands() on non-RegShiftedReg!");
2559  Inst.addOperand(MCOperand::createReg(RegShiftedReg.SrcReg));
2560  Inst.addOperand(MCOperand::createReg(RegShiftedReg.ShiftReg));
2562  ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
2563  }
2564 
2565  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
2566  assert(N == 2 && "Invalid number of operands!");
2567  assert(isRegShiftedImm() &&
2568  "addRegShiftedImmOperands() on non-RegShiftedImm!");
2569  Inst.addOperand(MCOperand::createReg(RegShiftedImm.SrcReg));
2570  // Shift of #32 is encoded as 0 where permitted
2571  unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
2573  ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
2574  }
2575 
2576  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
2577  assert(N == 1 && "Invalid number of operands!");
2578  Inst.addOperand(MCOperand::createImm((ShifterImm.isASR << 5) |
2579  ShifterImm.Imm));
2580  }
2581 
2582  void addRegListOperands(MCInst &Inst, unsigned N) const {
2583  assert(N == 1 && "Invalid number of operands!");
2584  const SmallVectorImpl<unsigned> &RegList = getRegList();
2585  for (unsigned Reg : RegList)
2587  }
2588 
2589  void addRegListWithAPSROperands(MCInst &Inst, unsigned N) const {
2590  assert(N == 1 && "Invalid number of operands!");
2591  const SmallVectorImpl<unsigned> &RegList = getRegList();
2592  for (unsigned Reg : RegList)
2594  }
2595 
2596  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
2597  addRegListOperands(Inst, N);
2598  }
2599 
2600  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
2601  addRegListOperands(Inst, N);
2602  }
2603 
2604  void addFPSRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2605  addRegListOperands(Inst, N);
2606  }
2607 
2608  void addFPDRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2609  addRegListOperands(Inst, N);
2610  }
2611 
2612  void addRotImmOperands(MCInst &Inst, unsigned N) const {
2613  assert(N == 1 && "Invalid number of operands!");
2614  // Encoded as val>>3. The printer handles display as 8, 16, 24.
2615  Inst.addOperand(MCOperand::createImm(RotImm.Imm >> 3));
2616  }
2617 
2618  void addModImmOperands(MCInst &Inst, unsigned N) const {
2619  assert(N == 1 && "Invalid number of operands!");
2620 
2621  // Support for fixups (MCFixup)
2622  if (isImm())
2623  return addImmOperands(Inst, N);
2624 
2625  Inst.addOperand(MCOperand::createImm(ModImm.Bits | (ModImm.Rot << 7)));
2626  }
2627 
2628  void addModImmNotOperands(MCInst &Inst, unsigned N) const {
2629  assert(N == 1 && "Invalid number of operands!");
2630  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2631  uint32_t Enc = ARM_AM::getSOImmVal(~CE->getValue());
2632  Inst.addOperand(MCOperand::createImm(Enc));
2633  }
2634 
2635  void addModImmNegOperands(MCInst &Inst, unsigned N) const {
2636  assert(N == 1 && "Invalid number of operands!");
2637  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2638  uint32_t Enc = ARM_AM::getSOImmVal(-CE->getValue());
2639  Inst.addOperand(MCOperand::createImm(Enc));
2640  }
2641 
2642  void addThumbModImmNeg8_255Operands(MCInst &Inst, unsigned N) const {
2643  assert(N == 1 && "Invalid number of operands!");
2644  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2645  uint32_t Val = -CE->getValue();
2646  Inst.addOperand(MCOperand::createImm(Val));
2647  }
2648 
2649  void addThumbModImmNeg1_7Operands(MCInst &Inst, unsigned N) const {
2650  assert(N == 1 && "Invalid number of operands!");
2651  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2652  uint32_t Val = -CE->getValue();
2653  Inst.addOperand(MCOperand::createImm(Val));
2654  }
2655 
2656  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
2657  assert(N == 1 && "Invalid number of operands!");
2658  // Munge the lsb/width into a bitfield mask.
2659  unsigned lsb = Bitfield.LSB;
2660  unsigned width = Bitfield.Width;
2661  // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
2662  uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
2663  (32 - (lsb + width)));
2665  }
2666 
2667  void addImmOperands(MCInst &Inst, unsigned N) const {
2668  assert(N == 1 && "Invalid number of operands!");
2669  addExpr(Inst, getImm());
2670  }
2671 
2672  void addFBits16Operands(MCInst &Inst, unsigned N) const {
2673  assert(N == 1 && "Invalid number of operands!");
2674  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2675  Inst.addOperand(MCOperand::createImm(16 - CE->getValue()));
2676  }
2677 
2678  void addFBits32Operands(MCInst &Inst, unsigned N) const {
2679  assert(N == 1 && "Invalid number of operands!");
2680  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2681  Inst.addOperand(MCOperand::createImm(32 - CE->getValue()));
2682  }
2683 
2684  void addFPImmOperands(MCInst &Inst, unsigned N) const {
2685  assert(N == 1 && "Invalid number of operands!");
2686  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2687  int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
2688  Inst.addOperand(MCOperand::createImm(Val));
2689  }
2690 
2691  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
2692  assert(N == 1 && "Invalid number of operands!");
2693  // FIXME: We really want to scale the value here, but the LDRD/STRD
2694  // instruction don't encode operands that way yet.
2695  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2696  Inst.addOperand(MCOperand::createImm(CE->getValue()));
2697  }
2698 
2699  void addImm7s4Operands(MCInst &Inst, unsigned N) const {
2700  assert(N == 1 && "Invalid number of operands!");
2701  // FIXME: We really want to scale the value here, but the VSTR/VLDR_VSYSR
2702  // instruction don't encode operands that way yet.
2703  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2704  Inst.addOperand(MCOperand::createImm(CE->getValue()));
2705  }
2706 
2707  void addImm7Shift0Operands(MCInst &Inst, unsigned N) const {
2708  assert(N == 1 && "Invalid number of operands!");
2709  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2710  Inst.addOperand(MCOperand::createImm(CE->getValue()));
2711  }
2712 
2713  void addImm7Shift1Operands(MCInst &Inst, unsigned N) const {
2714  assert(N == 1 && "Invalid number of operands!");
2715  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2716  Inst.addOperand(MCOperand::createImm(CE->getValue()));
2717  }
2718 
2719  void addImm7Shift2Operands(MCInst &Inst, unsigned N) const {
2720  assert(N == 1 && "Invalid number of operands!");
2721  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2722  Inst.addOperand(MCOperand::createImm(CE->getValue()));
2723  }
2724 
2725  void addImm7Operands(MCInst &Inst, unsigned N) const {
2726  assert(N == 1 && "Invalid number of operands!");
2727  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2728  Inst.addOperand(MCOperand::createImm(CE->getValue()));
2729  }
2730 
2731  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
2732  assert(N == 1 && "Invalid number of operands!");
2733  // The immediate is scaled by four in the encoding and is stored
2734  // in the MCInst as such. Lop off the low two bits here.
2735  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2736  Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2737  }
2738 
2739  void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
2740  assert(N == 1 && "Invalid number of operands!");
2741  // The immediate is scaled by four in the encoding and is stored
2742  // in the MCInst as such. Lop off the low two bits here.
2743  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2744  Inst.addOperand(MCOperand::createImm(-(CE->getValue() / 4)));
2745  }
2746 
2747  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
2748  assert(N == 1 && "Invalid number of operands!");
2749  // The immediate is scaled by four in the encoding and is stored
2750  // in the MCInst as such. Lop off the low two bits here.
2751  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2752  Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2753  }
2754 
2755  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
2756  assert(N == 1 && "Invalid number of operands!");
2757  // The constant encodes as the immediate-1, and we store in the instruction
2758  // the bits as encoded, so subtract off one here.
2759  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2760  Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2761  }
2762 
2763  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
2764  assert(N == 1 && "Invalid number of operands!");
2765  // The constant encodes as the immediate-1, and we store in the instruction
2766  // the bits as encoded, so subtract off one here.
2767  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2768  Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2769  }
2770 
2771  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
2772  assert(N == 1 && "Invalid number of operands!");
2773  // The constant encodes as the immediate, except for 32, which encodes as
2774  // zero.
2775  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2776  unsigned Imm = CE->getValue();
2777  Inst.addOperand(MCOperand::createImm((Imm == 32 ? 0 : Imm)));
2778  }
2779 
2780  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
2781  assert(N == 1 && "Invalid number of operands!");
2782  // An ASR value of 32 encodes as 0, so that's how we want to add it to
2783  // the instruction as well.
2784  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2785  int Val = CE->getValue();
2786  Inst.addOperand(MCOperand::createImm(Val == 32 ? 0 : Val));
2787  }
2788 
2789  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
2790  assert(N == 1 && "Invalid number of operands!");
2791  // The operand is actually a t2_so_imm, but we have its bitwise
2792  // negation in the assembly source, so twiddle it here.
2793  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2794  Inst.addOperand(MCOperand::createImm(~(uint32_t)CE->getValue()));
2795  }
2796 
2797  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
2798  assert(N == 1 && "Invalid number of operands!");
2799  // The operand is actually a t2_so_imm, but we have its
2800  // negation in the assembly source, so twiddle it here.
2801  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2802  Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2803  }
2804 
2805  void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
2806  assert(N == 1 && "Invalid number of operands!");
2807  // The operand is actually an imm0_4095, but we have its
2808  // negation in the assembly source, so twiddle it here.
2809  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2810  Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2811  }
2812 
2813  void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const {
2814  if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
2815  Inst.addOperand(MCOperand::createImm(CE->getValue() >> 2));
2816  return;
2817  }
2818  const MCSymbolRefExpr *SR = cast<MCSymbolRefExpr>(Imm.Val);
2820  }
2821 
2822  void addThumbMemPCOperands(MCInst &Inst, unsigned N) const {
2823  assert(N == 1 && "Invalid number of operands!");
2824  if (isImm()) {
2825  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2826  if (CE) {
2827  Inst.addOperand(MCOperand::createImm(CE->getValue()));
2828  return;
2829  }
2830  const MCSymbolRefExpr *SR = cast<MCSymbolRefExpr>(Imm.Val);
2832  return;
2833  }
2834 
2835  assert(isGPRMem() && "Unknown value type!");
2836  assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!");
2837  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
2838  Inst.addOperand(MCOperand::createImm(CE->getValue()));
2839  else
2840  Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
2841  }
2842 
2843  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
2844  assert(N == 1 && "Invalid number of operands!");
2845  Inst.addOperand(MCOperand::createImm(unsigned(getMemBarrierOpt())));
2846  }
2847 
2848  void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2849  assert(N == 1 && "Invalid number of operands!");
2850  Inst.addOperand(MCOperand::createImm(unsigned(getInstSyncBarrierOpt())));
2851  }
2852 
2853  void addTraceSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2854  assert(N == 1 && "Invalid number of operands!");
2855  Inst.addOperand(MCOperand::createImm(unsigned(getTraceSyncBarrierOpt())));
2856  }
2857 
2858  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
2859  assert(N == 1 && "Invalid number of operands!");
2860  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2861  }
2862 
2863  void addMemNoOffsetT2Operands(MCInst &Inst, unsigned N) const {
2864  assert(N == 1 && "Invalid number of operands!");
2865  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2866  }
2867 
2868  void addMemNoOffsetT2NoSpOperands(MCInst &Inst, unsigned N) const {
2869  assert(N == 1 && "Invalid number of operands!");
2870  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2871  }
2872 
2873  void addMemNoOffsetTOperands(MCInst &Inst, unsigned N) const {
2874  assert(N == 1 && "Invalid number of operands!");
2875  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2876  }
2877 
2878  void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
2879  assert(N == 1 && "Invalid number of operands!");
2880  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
2881  Inst.addOperand(MCOperand::createImm(CE->getValue()));
2882  else
2883  Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
2884  }
2885 
2886  void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2887  assert(N == 1 && "Invalid number of operands!");
2888  assert(isImm() && "Not an immediate!");
2889 
2890  // If we have an immediate that's not a constant, treat it as a label
2891  // reference needing a fixup.
2892  if (!isa<MCConstantExpr>(getImm())) {
2893  Inst.addOperand(MCOperand::createExpr(getImm()));
2894  return;
2895  }
2896 
2897  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2898  int Val = CE->getValue();
2899  Inst.addOperand(MCOperand::createImm(Val));
2900  }
2901 
2902  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
2903  assert(N == 2 && "Invalid number of operands!");
2904  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2905  Inst.addOperand(MCOperand::createImm(Memory.Alignment));
2906  }
2907 
2908  void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2909  addAlignedMemoryOperands(Inst, N);
2910  }
2911 
2912  void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2913  addAlignedMemoryOperands(Inst, N);
2914  }
2915 
2916  void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2917  addAlignedMemoryOperands(Inst, N);
2918  }
2919 
2920  void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2921  addAlignedMemoryOperands(Inst, N);
2922  }
2923 
2924  void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2925  addAlignedMemoryOperands(Inst, N);
2926  }
2927 
2928  void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2929  addAlignedMemoryOperands(Inst, N);
2930  }
2931 
2932  void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2933  addAlignedMemoryOperands(Inst, N);
2934  }
2935 
2936  void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2937  addAlignedMemoryOperands(Inst, N);
2938  }
2939 
2940  void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2941  addAlignedMemoryOperands(Inst, N);
2942  }
2943 
2944  void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2945  addAlignedMemoryOperands(Inst, N);
2946  }
2947 
2948  void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const {
2949  addAlignedMemoryOperands(Inst, N);
2950  }
2951 
2952  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
2953  assert(N == 3 && "Invalid number of operands!");
2954  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2955  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2956  if (!Memory.OffsetRegNum) {
2957  if (!Memory.OffsetImm)
2959  else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
2960  int32_t Val = CE->getValue();
2962  // Special case for #-0
2963  if (Val == std::numeric_limits<int32_t>::min())
2964  Val = 0;
2965  if (Val < 0)
2966  Val = -Val;
2967  Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2968  Inst.addOperand(MCOperand::createImm(Val));
2969  } else
2970  Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
2971  } else {
2972  // For register offset, we encode the shift type and negation flag
2973  // here.
2974  int32_t Val =
2976  Memory.ShiftImm, Memory.ShiftType);
2977  Inst.addOperand(MCOperand::createImm(Val));
2978  }
2979  }
2980 
2981  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
2982  assert(N == 2 && "Invalid number of operands!");
2983  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2984  assert(CE && "non-constant AM2OffsetImm operand!");
2985  int32_t Val = CE->getValue();
2987  // Special case for #-0
2988  if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2989  if (Val < 0) Val = -Val;
2990  Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2992  Inst.addOperand(MCOperand::createImm(Val));
2993  }
2994 
2995  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
2996  assert(N == 3 && "Invalid number of operands!");
2997  // If we have an immediate that's not a constant, treat it as a label
2998  // reference needing a fixup. If it is a constant, it's something else
2999  // and we reject it.
3000  if (isImm()) {
3001  Inst.addOperand(MCOperand::createExpr(getImm()));
3004  return;
3005  }
3006 
3007  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3008  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3009  if (!Memory.OffsetRegNum) {
3010  if (!Memory.OffsetImm)
3012  else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3013  int32_t Val = CE->getValue();
3015  // Special case for #-0
3016  if (Val == std::numeric_limits<int32_t>::min())
3017  Val = 0;
3018  if (Val < 0)
3019  Val = -Val;
3020  Val = ARM_AM::getAM3Opc(AddSub, Val);
3021  Inst.addOperand(MCOperand::createImm(Val));
3022  } else
3023  Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3024  } else {
3025  // For register offset, we encode the shift type and negation flag
3026  // here.
3027  int32_t Val =
3028  ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
3029  Inst.addOperand(MCOperand::createImm(Val));
3030  }
3031  }
3032 
3033  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
3034  assert(N == 2 && "Invalid number of operands!");
3035  if (Kind == k_PostIndexRegister) {
3036  int32_t Val =
3037  ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
3038  Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3039  Inst.addOperand(MCOperand::createImm(Val));
3040  return;
3041  }
3042 
3043  // Constant offset.
3044  const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
3045  int32_t Val = CE->getValue();
3047  // Special case for #-0
3048  if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
3049  if (Val < 0) Val = -Val;
3050  Val = ARM_AM::getAM3Opc(AddSub, Val);
3052  Inst.addOperand(MCOperand::createImm(Val));
3053  }
3054 
3055  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
3056  assert(N == 2 && "Invalid number of operands!");
3057  // If we have an immediate that's not a constant, treat it as a label
3058  // reference needing a fixup. If it is a constant, it's something else
3059  // and we reject it.
3060  if (isImm()) {
3061  Inst.addOperand(MCOperand::createExpr(getImm()));
3063  return;
3064  }
3065 
3066  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3067  if (!Memory.OffsetImm)
3069  else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3070  // The lower two bits are always zero and as such are not encoded.
3071  int32_t Val = CE->getValue() / 4;
3073  // Special case for #-0
3074  if (Val == std::numeric_limits<int32_t>::min())
3075  Val = 0;
3076  if (Val < 0)
3077  Val = -Val;
3078  Val = ARM_AM::getAM5Opc(AddSub, Val);
3079  Inst.addOperand(MCOperand::createImm(Val));
3080  } else
3081  Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3082  }
3083 
3084  void addAddrMode5FP16Operands(MCInst &Inst, unsigned N) const {
3085  assert(N == 2 && "Invalid number of operands!");
3086  // If we have an immediate that's not a constant, treat it as a label
3087  // reference needing a fixup. If it is a constant, it's something else
3088  // and we reject it.
3089  if (isImm()) {
3090  Inst.addOperand(MCOperand::createExpr(getImm()));
3092  return;
3093  }
3094 
3095  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3096  // The lower bit is always zero and as such is not encoded.
3097  if (!Memory.OffsetImm)
3099  else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3100  int32_t Val = CE->getValue() / 2;
3102  // Special case for #-0
3103  if (Val == std::numeric_limits<int32_t>::min())
3104  Val = 0;
3105  if (Val < 0)
3106  Val = -Val;
3107  Val = ARM_AM::getAM5FP16Opc(AddSub, Val);
3108  Inst.addOperand(MCOperand::createImm(Val));
3109  } else
3110  Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3111  }
3112 
3113  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
3114  assert(N == 2 && "Invalid number of operands!");
3115  // If we have an immediate that's not a constant, treat it as a label
3116  // reference needing a fixup. If it is a constant, it's something else
3117  // and we reject it.
3118  if (isImm()) {
3119  Inst.addOperand(MCOperand::createExpr(getImm()));
3121  return;
3122  }
3123 
3124  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3125  addExpr(Inst, Memory.OffsetImm);
3126  }
3127 
3128  void addMemImm7s4OffsetOperands(MCInst &Inst, unsigned N) const {
3129  assert(N == 2 && "Invalid number of operands!");
3130  // If we have an immediate that's not a constant, treat it as a label
3131  // reference needing a fixup. If it is a constant, it's something else
3132  // and we reject it.
3133  if (isImm()) {
3134  Inst.addOperand(MCOperand::createExpr(getImm()));
3136  return;
3137  }
3138 
3139  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3140  addExpr(Inst, Memory.OffsetImm);
3141  }
3142 
3143  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
3144  assert(N == 2 && "Invalid number of operands!");
3145  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3146  if (!Memory.OffsetImm)
3148  else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3149  // The lower two bits are always zero and as such are not encoded.
3150  Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3151  else
3152  Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3153  }
3154 
3155  void addMemImmOffsetOperands(MCInst &Inst, unsigned N) const {
3156  assert(N == 2 && "Invalid number of operands!");
3157  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3158  addExpr(Inst, Memory.OffsetImm);
3159  }
3160 
3161  void addMemRegRQOffsetOperands(MCInst &Inst, unsigned N) const {
3162  assert(N == 2 && "Invalid number of operands!");
3163  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3164  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3165  }
3166 
3167  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
3168  assert(N == 2 && "Invalid number of operands!");
3169  // If this is an immediate, it's a label reference.
3170  if (isImm()) {
3171  addExpr(Inst, getImm());
3173  return;
3174  }
3175 
3176  // Otherwise, it's a normal memory reg+offset.
3177  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3178  addExpr(Inst, Memory.OffsetImm);
3179  }
3180 
3181  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
3182  assert(N == 2 && "Invalid number of operands!");
3183  // If this is an immediate, it's a label reference.
3184  if (isImm()) {
3185  addExpr(Inst, getImm());
3187  return;
3188  }
3189 
3190  // Otherwise, it's a normal memory reg+offset.
3191  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3192  addExpr(Inst, Memory.OffsetImm);
3193  }
3194 
3195  void addConstPoolAsmImmOperands(MCInst &Inst, unsigned N) const {
3196  assert(N == 1 && "Invalid number of operands!");
3197  // This is container for the immediate that we will create the constant
3198  // pool from
3199  addExpr(Inst, getConstantPoolImm());
3200  }
3201 
3202  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
3203  assert(N == 2 && "Invalid number of operands!");
3204  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3205  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3206  }
3207 
3208  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
3209  assert(N == 2 && "Invalid number of operands!");
3210  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3211  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3212  }
3213 
3214  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
3215  assert(N == 3 && "Invalid number of operands!");
3216  unsigned Val =
3218  Memory.ShiftImm, Memory.ShiftType);
3219  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3220  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3221  Inst.addOperand(MCOperand::createImm(Val));
3222  }
3223 
3224  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
3225  assert(N == 3 && "Invalid number of operands!");
3226  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3227  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3228  Inst.addOperand(MCOperand::createImm(Memory.ShiftImm));
3229  }
3230 
3231  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
3232  assert(N == 2 && "Invalid number of operands!");
3233  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3234  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3235  }
3236 
3237  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
3238  assert(N == 2 && "Invalid number of operands!");
3239  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3240  if (!Memory.OffsetImm)
3242  else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3243  // The lower two bits are always zero and as such are not encoded.
3244  Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3245  else
3246  Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3247  }
3248 
3249  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
3250  assert(N == 2 && "Invalid number of operands!");
3251  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3252  if (!Memory.OffsetImm)
3254  else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3255  Inst.addOperand(MCOperand::createImm(CE->getValue() / 2));
3256  else
3257  Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3258  }
3259 
3260  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
3261  assert(N == 2 && "Invalid number of operands!");
3262  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3263  addExpr(Inst, Memory.OffsetImm);
3264  }
3265 
3266  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
3267  assert(N == 2 && "Invalid number of operands!");
3268  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3269  if (!Memory.OffsetImm)
3271  else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3272  // The lower two bits are always zero and as such are not encoded.
3273  Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3274  else
3275  Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3276  }
3277 
3278  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
3279  assert(N == 1 && "Invalid number of operands!");
3280  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3281  assert(CE && "non-constant post-idx-imm8 operand!");
3282  int Imm = CE->getValue();
3283  bool isAdd = Imm >= 0;
3285  Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
3287  }
3288 
3289  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
3290  assert(N == 1 && "Invalid number of operands!");
3291  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3292  assert(CE && "non-constant post-idx-imm8s4 operand!");
3293  int Imm = CE->getValue();
3294  bool isAdd = Imm >= 0;
3296  // Immediate is scaled by 4.
3297  Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
3299  }
3300 
3301  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
3302  assert(N == 2 && "Invalid number of operands!");
3303  Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3304  Inst.addOperand(MCOperand::createImm(PostIdxReg.isAdd));
3305  }
3306 
3307  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
3308  assert(N == 2 && "Invalid number of operands!");
3309  Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3310  // The sign, shift type, and shift amount are encoded in a single operand
3311  // using the AM2 encoding helpers.
3312  ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
3313  unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
3314  PostIdxReg.ShiftTy);
3316  }
3317 
3318  void addPowerTwoOperands(MCInst &Inst, unsigned N) const {
3319  assert(N == 1 && "Invalid number of operands!");
3320  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3321  Inst.addOperand(MCOperand::createImm(CE->getValue()));
3322  }
3323 
3324  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
3325  assert(N == 1 && "Invalid number of operands!");
3326  Inst.addOperand(MCOperand::createImm(unsigned(getMSRMask())));
3327  }
3328 
3329  void addBankedRegOperands(MCInst &Inst, unsigned N) const {
3330  assert(N == 1 && "Invalid number of operands!");
3331  Inst.addOperand(MCOperand::createImm(unsigned(getBankedReg())));
3332  }
3333 
3334  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
3335  assert(N == 1 && "Invalid number of operands!");
3336  Inst.addOperand(MCOperand::createImm(unsigned(getProcIFlags())));
3337  }
3338 
3339  void addVecListOperands(MCInst &Inst, unsigned N) const {
3340  assert(N == 1 && "Invalid number of operands!");
3341  Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
3342  }
3343 
3344  void addMVEVecListOperands(MCInst &Inst, unsigned N) const {
3345  assert(N == 1 && "Invalid number of operands!");
3346 
3347  // When we come here, the VectorList field will identify a range
3348  // of q-registers by its base register and length, and it will
3349  // have already been error-checked to be the expected length of
3350  // range and contain only q-regs in the range q0-q7. So we can
3351  // count on the base register being in the range q0-q6 (for 2
3352  // regs) or q0-q4 (for 4)
3353  //
3354  // The MVE instructions taking a register range of this kind will
3355  // need an operand in the MQQPR or MQQQQPR class, representing the
3356  // entire range as a unit. So we must translate into that class,
3357  // by finding the index of the base register in the MQPR reg
3358  // class, and returning the super-register at the corresponding
3359  // index in the target class.
3360 
3361  const MCRegisterClass *RC_in = &ARMMCRegisterClasses[ARM::MQPRRegClassID];
3362  const MCRegisterClass *RC_out =
3363  (VectorList.Count == 2) ? &ARMMCRegisterClasses[ARM::MQQPRRegClassID]
3364  : &ARMMCRegisterClasses[ARM::MQQQQPRRegClassID];
3365 
3366  unsigned I, E = RC_out->getNumRegs();
3367  for (I = 0; I < E; I++)
3368  if (RC_in->getRegister(I) == VectorList.RegNum)
3369  break;
3370  assert(I < E && "Invalid vector list start register!");
3371 
3372  Inst.addOperand(MCOperand::createReg(RC_out->getRegister(I)));
3373  }
3374 
3375  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
3376  assert(N == 2 && "Invalid number of operands!");
3377  Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
3378  Inst.addOperand(MCOperand::createImm(VectorList.LaneIndex));
3379  }
3380 
3381  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
3382  assert(N == 1 && "Invalid number of operands!");
3383  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3384  }
3385 
3386  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
3387  assert(N == 1 && "Invalid number of operands!");
3388  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3389  }
3390 
3391  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
3392  assert(N == 1 && "Invalid number of operands!");
3393  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3394  }
3395 
3396  void addVectorIndex64Operands(MCInst &Inst, unsigned N) const {
3397  assert(N == 1 && "Invalid number of operands!");
3398  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3399  }
3400 
3401  void addMVEVectorIndexOperands(MCInst &Inst, unsigned N) const {
3402  assert(N == 1 && "Invalid number of operands!");
3403  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3404  }
3405 
3406  void addMVEPairVectorIndexOperands(MCInst &Inst, unsigned N) const {
3407  assert(N == 1 && "Invalid number of operands!");
3408  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3409  }
3410 
3411  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
3412  assert(N == 1 && "Invalid number of operands!");
3413  // The immediate encodes the type of constant as well as the value.
3414  // Mask in that this is an i8 splat.
3415  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3416  Inst.addOperand(MCOperand::createImm(CE->getValue() | 0xe00));
3417  }
3418 
3419  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
3420  assert(N == 1 && "Invalid number of operands!");
3421  // The immediate encodes the type of constant as well as the value.
3422  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3423  unsigned Value = CE->getValue();
3426  }
3427 
3428  void addNEONi16splatNotOperands(MCInst &Inst, unsigned N) const {
3429  assert(N == 1 && "Invalid number of operands!");
3430  // The immediate encodes the type of constant as well as the value.
3431  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3432  unsigned Value = CE->getValue();
3433  Value = ARM_AM::encodeNEONi16splat(~Value & 0xffff);
3435  }
3436 
3437  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
3438  assert(N == 1 && "Invalid number of operands!");
3439  // The immediate encodes the type of constant as well as the value.
3440  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3441  unsigned Value = CE->getValue();
3444  }
3445 
3446  void addNEONi32splatNotOperands(MCInst &Inst, unsigned N) const {
3447  assert(N == 1 && "Invalid number of operands!");
3448  // The immediate encodes the type of constant as well as the value.
3449  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3450  unsigned Value = CE->getValue();
3453  }
3454 
3455  void addNEONi8ReplicateOperands(MCInst &Inst, bool Inv) const {
3456  // The immediate encodes the type of constant as well as the value.
3457  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3458  assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
3459  Inst.getOpcode() == ARM::VMOVv16i8) &&
3460  "All instructions that wants to replicate non-zero byte "
3461  "always must be replaced with VMOVv8i8 or VMOVv16i8.");
3462  unsigned Value = CE->getValue();
3463  if (Inv)
3464  Value = ~Value;
3465  unsigned B = Value & 0xff;
3466  B |= 0xe00; // cmode = 0b1110
3468  }
3469 
3470  void addNEONinvi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3471  assert(N == 1 && "Invalid number of operands!");
3472  addNEONi8ReplicateOperands(Inst, true);
3473  }
3474 
3475  static unsigned encodeNeonVMOVImmediate(unsigned Value) {
3476  if (Value >= 256 && Value <= 0xffff)
3477  Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
3478  else if (Value > 0xffff && Value <= 0xffffff)
3479  Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
3480  else if (Value > 0xffffff)
3481  Value = (Value >> 24) | 0x600;
3482  return Value;
3483  }
3484 
3485  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
3486  assert(N == 1 && "Invalid number of operands!");
3487  // The immediate encodes the type of constant as well as the value.
3488  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3489  unsigned Value = encodeNeonVMOVImmediate(CE->getValue());
3491  }
3492 
3493  void addNEONvmovi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3494  assert(N == 1 && "Invalid number of operands!");
3495  addNEONi8ReplicateOperands(Inst, false);
3496  }
3497 
3498  void addNEONvmovi16ReplicateOperands(MCInst &Inst, unsigned N) const {
3499  assert(N == 1 && "Invalid number of operands!");
3500  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3501  assert((Inst.getOpcode() == ARM::VMOVv4i16 ||
3502  Inst.getOpcode() == ARM::VMOVv8i16 ||
3503  Inst.getOpcode() == ARM::VMVNv4i16 ||
3504  Inst.getOpcode() == ARM::VMVNv8i16) &&
3505  "All instructions that want to replicate non-zero half-word "
3506  "always must be replaced with V{MOV,MVN}v{4,8}i16.");
3507  uint64_t Value = CE->getValue();
3508  unsigned Elem = Value & 0xffff;
3509  if (Elem >= 256)
3510  Elem = (Elem >> 8) | 0x200;
3511  Inst.addOperand(MCOperand::createImm(Elem));
3512  }
3513 
3514  void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
3515  assert(N == 1 && "Invalid number of operands!");
3516  // The immediate encodes the type of constant as well as the value.
3517  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3518  unsigned Value = encodeNeonVMOVImmediate(~CE->getValue());
3520  }
3521 
3522  void addNEONvmovi32ReplicateOperands(MCInst &Inst, unsigned N) const {
3523  assert(N == 1 && "Invalid number of operands!");
3524  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3525  assert((Inst.getOpcode() == ARM::VMOVv2i32 ||
3526  Inst.getOpcode() == ARM::VMOVv4i32 ||
3527  Inst.getOpcode() == ARM::VMVNv2i32 ||
3528  Inst.getOpcode() == ARM::VMVNv4i32) &&
3529  "All instructions that want to replicate non-zero word "
3530  "always must be replaced with V{MOV,MVN}v{2,4}i32.");
3531  uint64_t Value = CE->getValue();
3532  unsigned Elem = encodeNeonVMOVImmediate(Value & 0xffffffff);
3533  Inst.addOperand(MCOperand::createImm(Elem));
3534  }
3535 
3536  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
3537  assert(N == 1 && "Invalid number of operands!");
3538  // The immediate encodes the type of constant as well as the value.
3539  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3540  uint64_t Value = CE->getValue();
3541  unsigned Imm = 0;
3542  for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
3543  Imm |= (Value & 1) << i;
3544  }
3545  Inst.addOperand(MCOperand::createImm(Imm | 0x1e00));
3546  }
3547 
3548  void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
3549  assert(N == 1 && "Invalid number of operands!");
3550  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3551  Inst.addOperand(MCOperand::createImm(CE->getValue() / 90));
3552  }
3553 
3554  void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
3555  assert(N == 1 && "Invalid number of operands!");
3556  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3557  Inst.addOperand(MCOperand::createImm((CE->getValue() - 90) / 180));
3558  }
3559 
3560  void addMveSaturateOperands(MCInst &Inst, unsigned N) const {
3561  assert(N == 1 && "Invalid number of operands!");
3562  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3563  unsigned Imm = CE->getValue();
3564  assert((Imm == 48 || Imm == 64) && "Invalid saturate operand");
3565  Inst.addOperand(MCOperand::createImm(Imm == 48 ? 1 : 0));
3566  }
3567 
3568  void print(raw_ostream &OS) const override;
3569 
3570  static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S) {
3571  auto Op = std::make_unique<ARMOperand>(k_ITCondMask);
3572  Op->ITMask.Mask = Mask;
3573  Op->StartLoc = S;
3574  Op->EndLoc = S;
3575  return Op;
3576  }
3577 
3578  static std::unique_ptr<ARMOperand> CreateCondCode(ARMCC::CondCodes CC,
3579  SMLoc S) {
3580  auto Op = std::make_unique<ARMOperand>(k_CondCode);
3581  Op->CC.Val = CC;
3582  Op->StartLoc = S;
3583  Op->EndLoc = S;
3584  return Op;
3585  }
3586 
3587  static std::unique_ptr<ARMOperand> CreateVPTPred(ARMVCC::VPTCodes CC,
3588  SMLoc S) {
3589  auto Op = std::make_unique<ARMOperand>(k_VPTPred);
3590  Op->VCC.Val = CC;
3591  Op->StartLoc = S;
3592  Op->EndLoc = S;
3593  return Op;
3594  }
3595 
3596  static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S) {
3597  auto Op = std::make_unique<ARMOperand>(k_CoprocNum);
3598  Op->Cop.Val = CopVal;
3599  Op->StartLoc = S;
3600  Op->EndLoc = S;
3601  return Op;
3602  }
3603 
3604  static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S) {
3605  auto Op = std::make_unique<ARMOperand>(k_CoprocReg);
3606  Op->Cop.Val = CopVal;
3607  Op->StartLoc = S;
3608  Op->EndLoc = S;
3609  return Op;
3610  }
3611 
3612  static std::unique_ptr<ARMOperand> CreateCoprocOption(unsigned Val, SMLoc S,
3613  SMLoc E) {
3614  auto Op = std::make_unique<ARMOperand>(k_CoprocOption);
3615  Op->Cop.Val = Val;
3616  Op->StartLoc = S;
3617  Op->EndLoc = E;
3618  return Op;
3619  }
3620 
3621  static std::unique_ptr<ARMOperand> CreateCCOut(unsigned RegNum, SMLoc S) {
3622  auto Op = std::make_unique<ARMOperand>(k_CCOut);
3623  Op->Reg.RegNum = RegNum;
3624  Op->StartLoc = S;
3625  Op->EndLoc = S;
3626  return Op;
3627  }
3628 
3629  static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S) {
3630  auto Op = std::make_unique<ARMOperand>(k_Token);
3631  Op->Tok.Data = Str.data();
3632  Op->Tok.Length = Str.size();
3633  Op->StartLoc = S;
3634  Op->EndLoc = S;
3635  return Op;
3636  }
3637 
3638  static std::unique_ptr<ARMOperand> CreateReg(unsigned RegNum, SMLoc S,
3639  SMLoc E) {
3640  auto Op = std::make_unique<ARMOperand>(k_Register);
3641  Op->Reg.RegNum = RegNum;
3642  Op->StartLoc = S;
3643  Op->EndLoc = E;
3644  return Op;
3645  }
3646 
3647  static std::unique_ptr<ARMOperand>
3648  CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
3649  unsigned ShiftReg, unsigned ShiftImm, SMLoc S,
3650  SMLoc E) {
3651  auto Op = std::make_unique<ARMOperand>(k_ShiftedRegister);
3652  Op->RegShiftedReg.ShiftTy = ShTy;
3653  Op->RegShiftedReg.SrcReg = SrcReg;
3654  Op->RegShiftedReg.ShiftReg = ShiftReg;
3655  Op->RegShiftedReg.ShiftImm = ShiftImm;
3656  Op->StartLoc = S;
3657  Op->EndLoc = E;
3658  return Op;
3659  }
3660 
3661  static std::unique_ptr<ARMOperand>
3662  CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
3663  unsigned ShiftImm, SMLoc S, SMLoc E) {
3664  auto Op = std::make_unique<ARMOperand>(k_ShiftedImmediate);
3665  Op->RegShiftedImm.ShiftTy = ShTy;
3666  Op->RegShiftedImm.SrcReg = SrcReg;
3667  Op->RegShiftedImm.ShiftImm = ShiftImm;
3668  Op->StartLoc = S;
3669  Op->EndLoc = E;
3670  return Op;
3671  }
3672 
3673  static std::unique_ptr<ARMOperand> CreateShifterImm(bool isASR, unsigned Imm,
3674  SMLoc S, SMLoc E) {
3675  auto Op = std::make_unique<ARMOperand>(k_ShifterImmediate);
3676  Op->ShifterImm.isASR = isASR;
3677  Op->ShifterImm.Imm = Imm;
3678  Op->StartLoc = S;
3679  Op->EndLoc = E;
3680  return Op;
3681  }
3682 
3683  static std::unique_ptr<ARMOperand> CreateRotImm(unsigned Imm, SMLoc S,
3684  SMLoc E) {
3685  auto Op = std::make_unique<ARMOperand>(k_RotateImmediate);
3686  Op->RotImm.Imm = Imm;
3687  Op->StartLoc = S;
3688  Op->EndLoc = E;
3689  return Op;
3690  }
3691 
3692  static std::unique_ptr<ARMOperand> CreateModImm(unsigned Bits, unsigned Rot,
3693  SMLoc S, SMLoc E) {
3694  auto Op = std::make_unique<ARMOperand>(k_ModifiedImmediate);
3695  Op->ModImm.Bits = Bits;
3696  Op->ModImm.Rot = Rot;
3697  Op->StartLoc = S;
3698  Op->EndLoc = E;
3699  return Op;
3700  }
3701 
3702  static std::unique_ptr<ARMOperand>
3703  CreateConstantPoolImm(const MCExpr *Val, SMLoc S, SMLoc E) {
3704  auto Op = std::make_unique<ARMOperand>(k_ConstantPoolImmediate);
3705  Op->Imm.Val = Val;
3706  Op->StartLoc = S;
3707  Op->EndLoc = E;
3708  return Op;
3709  }
3710 
3711  static std::unique_ptr<ARMOperand>
3712  CreateBitfield(unsigned LSB, unsigned Width, SMLoc S, SMLoc E) {
3713  auto Op = std::make_unique<ARMOperand>(k_BitfieldDescriptor);
3714  Op->Bitfield.LSB = LSB;
3715  Op->Bitfield.Width = Width;
3716  Op->StartLoc = S;
3717  Op->EndLoc = E;
3718  return Op;
3719  }
3720 
3721  static std::unique_ptr<ARMOperand>
3722  CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
3723  SMLoc StartLoc, SMLoc EndLoc) {
3724  assert(Regs.size() > 0 && "RegList contains no registers?");
3725  KindTy Kind = k_RegisterList;
3726 
3727  if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
3728  Regs.front().second)) {
3729  if (Regs.back().second == ARM::VPR)
3730  Kind = k_FPDRegisterListWithVPR;
3731  else
3732  Kind = k_DPRRegisterList;
3733  } else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
3734  Regs.front().second)) {
3735  if (Regs.back().second == ARM::VPR)
3736  Kind = k_FPSRegisterListWithVPR;
3737  else
3738  Kind = k_SPRRegisterList;
3739  }
3740 
3741  if (Kind == k_RegisterList && Regs.back().second == ARM::APSR)
3742  Kind = k_RegisterListWithAPSR;
3743 
3744  assert(llvm::is_sorted(Regs) && "Register list must be sorted by encoding");
3745 
3746  auto Op = std::make_unique<ARMOperand>(Kind);
3747  for (const auto &P : Regs)
3748  Op->Registers.push_back(P.second);
3749 
3750  Op->StartLoc = StartLoc;
3751  Op->EndLoc = EndLoc;
3752  return Op;
3753  }
3754 
3755  static std::unique_ptr<ARMOperand> CreateVectorList(unsigned RegNum,
3756  unsigned Count,
3757  bool isDoubleSpaced,
3758  SMLoc S, SMLoc E) {
3759  auto Op = std::make_unique<ARMOperand>(k_VectorList);
3760  Op->VectorList.RegNum = RegNum;
3761  Op->VectorList.Count = Count;
3762  Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3763  Op->StartLoc = S;
3764  Op->EndLoc = E;
3765  return Op;
3766  }
3767 
3768  static std::unique_ptr<ARMOperand>
3769  CreateVectorListAllLanes(unsigned RegNum, unsigned Count, bool isDoubleSpaced,
3770  SMLoc S, SMLoc E) {
3771  auto Op = std::make_unique<ARMOperand>(k_VectorListAllLanes);
3772  Op->VectorList.RegNum = RegNum;
3773  Op->VectorList.Count = Count;
3774  Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3775  Op->StartLoc = S;
3776  Op->EndLoc = E;
3777  return Op;
3778  }
3779 
3780  static std::unique_ptr<ARMOperand>
3781  CreateVectorListIndexed(unsigned RegNum, unsigned Count, unsigned Index,
3782  bool isDoubleSpaced, SMLoc S, SMLoc E) {
3783  auto Op = std::make_unique<ARMOperand>(k_VectorListIndexed);
3784  Op->VectorList.RegNum = RegNum;
3785  Op->VectorList.Count = Count;
3786  Op->VectorList.LaneIndex = Index;
3787  Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3788  Op->StartLoc = S;
3789  Op->EndLoc = E;
3790  return Op;
3791  }
3792 
3793  static std::unique_ptr<ARMOperand>
3794  CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
3795  auto Op = std::make_unique<ARMOperand>(k_VectorIndex);
3796  Op->VectorIndex.Val = Idx;
3797  Op->StartLoc = S;
3798  Op->EndLoc = E;
3799  return Op;
3800  }
3801 
3802  static std::unique_ptr<ARMOperand> CreateImm(const MCExpr *Val, SMLoc S,
3803  SMLoc E) {
3804  auto Op = std::make_unique<ARMOperand>(k_Immediate);
3805  Op->Imm.Val = Val;
3806  Op->StartLoc = S;
3807  Op->EndLoc = E;
3808  return Op;
3809  }
3810 
3811  static std::unique_ptr<ARMOperand>
3812  CreateMem(unsigned BaseRegNum, const MCExpr *OffsetImm, unsigned OffsetRegNum,
3813  ARM_AM::ShiftOpc ShiftType, unsigned ShiftImm, unsigned Alignment,
3814  bool isNegative, SMLoc S, SMLoc E, SMLoc AlignmentLoc = SMLoc()) {
3815  auto Op = std::make_unique<ARMOperand>(k_Memory);
3816  Op->Memory.BaseRegNum = BaseRegNum;
3817  Op->Memory.OffsetImm = OffsetImm;
3818  Op->Memory.OffsetRegNum = OffsetRegNum;
3819  Op->Memory.ShiftType = ShiftType;
3820  Op->Memory.ShiftImm = ShiftImm;
3821  Op->Memory.Alignment = Alignment;
3822  Op->Memory.isNegative = isNegative;
3823  Op->StartLoc = S;
3824  Op->EndLoc = E;
3825  Op->AlignmentLoc = AlignmentLoc;
3826  return Op;
3827  }
3828 
3829  static std::unique_ptr<ARMOperand>
3830  CreatePostIdxReg(unsigned RegNum, bool isAdd, ARM_AM::ShiftOpc ShiftTy,
3831  unsigned ShiftImm, SMLoc S, SMLoc E) {
3832  auto Op = std::make_unique<ARMOperand>(k_PostIndexRegister);
3833  Op->PostIdxReg.RegNum = RegNum;
3834  Op->PostIdxReg.isAdd = isAdd;
3835  Op->PostIdxReg.ShiftTy = ShiftTy;
3836  Op->PostIdxReg.ShiftImm = ShiftImm;
3837  Op->StartLoc = S;
3838  Op->EndLoc = E;
3839  return Op;
3840  }
3841 
3842  static std::unique_ptr<ARMOperand> CreateMemBarrierOpt(ARM_MB::MemBOpt Opt,
3843  SMLoc S) {
3844  auto Op = std::make_unique<ARMOperand>(k_MemBarrierOpt);
3845  Op->MBOpt.Val = Opt;
3846  Op->StartLoc = S;
3847  Op->EndLoc = S;
3848  return Op;
3849  }
3850 
3851  static std::unique_ptr<ARMOperand>
3852  CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S) {
3853  auto Op = std::make_unique<ARMOperand>(k_InstSyncBarrierOpt);
3854  Op->ISBOpt.Val = Opt;
3855  Op->StartLoc = S;
3856  Op->EndLoc = S;
3857  return Op;
3858  }
3859 
3860  static std::unique_ptr<ARMOperand>
3861  CreateTraceSyncBarrierOpt(ARM_TSB::TraceSyncBOpt Opt, SMLoc S) {
3862  auto Op = std::make_unique<ARMOperand>(k_TraceSyncBarrierOpt);
3863  Op->TSBOpt.Val = Opt;
3864  Op->StartLoc = S;
3865  Op->EndLoc = S;
3866  return Op;
3867  }
3868 
3869  static std::unique_ptr<ARMOperand> CreateProcIFlags(ARM_PROC::IFlags IFlags,
3870  SMLoc S) {
3871  auto Op = std::make_unique<ARMOperand>(k_ProcIFlags);
3872  Op->IFlags.Val = IFlags;
3873  Op->StartLoc = S;
3874  Op->EndLoc = S;
3875  return Op;
3876  }
3877 
3878  static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S) {
3879  auto Op = std::make_unique<ARMOperand>(k_MSRMask);
3880  Op->MMask.Val = MMask;
3881  Op->StartLoc = S;
3882  Op->EndLoc = S;
3883  return Op;
3884  }
3885 
3886  static std::unique_ptr<ARMOperand> CreateBankedReg(unsigned Reg, SMLoc S) {
3887  auto Op = std::make_unique<ARMOperand>(k_BankedReg);
3888  Op->BankedReg.Val = Reg;
3889  Op->StartLoc = S;
3890  Op->EndLoc = S;
3891  return Op;
3892  }
3893 };
3894 
3895 } // end anonymous namespace.
3896 
3897 void ARMOperand::print(raw_ostream &OS) const {
3898  auto RegName = [](unsigned Reg) {
3899  if (Reg)
3901  else
3902  return "noreg";
3903  };
3904 
3905  switch (Kind) {
3906  case k_CondCode:
3907  OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
3908  break;
3909  case k_VPTPred:
3910  OS << "<ARMVCC::" << ARMVPTPredToString(getVPTPred()) << ">";
3911  break;
3912  case k_CCOut:
3913  OS << "<ccout " << RegName(getReg()) << ">";
3914  break;
3915  case k_ITCondMask: {
3916  static const char *const MaskStr[] = {
3917  "(invalid)", "(tttt)", "(ttt)", "(ttte)",
3918  "(tt)", "(ttet)", "(tte)", "(ttee)",
3919  "(t)", "(tett)", "(tet)", "(tete)",
3920  "(te)", "(teet)", "(tee)", "(teee)",
3921  };
3922  assert((ITMask.Mask & 0xf) == ITMask.Mask);
3923  OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
3924  break;
3925  }
3926  case k_CoprocNum:
3927  OS << "<coprocessor number: " << getCoproc() << ">";
3928  break;
3929  case k_CoprocReg:
3930  OS << "<coprocessor register: " << getCoproc() << ">";
3931  break;
3932  case k_CoprocOption:
3933  OS << "<coprocessor option: " << CoprocOption.Val << ">";
3934  break;
3935  case k_MSRMask:
3936  OS << "<mask: " << getMSRMask() << ">";
3937  break;
3938  case k_BankedReg:
3939  OS << "<banked reg: " << getBankedReg() << ">";
3940  break;
3941  case k_Immediate:
3942  OS << *getImm();
3943  break;
3944  case k_MemBarrierOpt:
3945  OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">";
3946  break;
3947  case k_InstSyncBarrierOpt:
3948  OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">";
3949  break;
3950  case k_TraceSyncBarrierOpt:
3951  OS << "<ARM_TSB::" << TraceSyncBOptToString(getTraceSyncBarrierOpt()) << ">";
3952  break;
3953  case k_Memory:
3954  OS << "<memory";
3955  if (Memory.BaseRegNum)
3956  OS << " base:" << RegName(Memory.BaseRegNum);
3957  if (Memory.OffsetImm)
3958  OS << " offset-imm:" << *Memory.OffsetImm;
3959  if (Memory.OffsetRegNum)
3960  OS << " offset-reg:" << (Memory.isNegative ? "-" : "")
3961  << RegName(Memory.OffsetRegNum);
3962  if (Memory.ShiftType != ARM_AM::no_shift) {
3963  OS << " shift-type:" << ARM_AM::getShiftOpcStr(Memory.ShiftType);
3964  OS << " shift-imm:" << Memory.ShiftImm;
3965  }
3966  if (Memory.Alignment)
3967  OS << " alignment:" << Memory.Alignment;
3968  OS << ">";
3969  break;
3970  case k_PostIndexRegister:
3971  OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
3972  << RegName(PostIdxReg.RegNum);
3973  if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
3974  OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
3975  << PostIdxReg.ShiftImm;
3976  OS << ">";
3977  break;
3978  case k_ProcIFlags: {
3979  OS << "<ARM_PROC::";
3980  unsigned IFlags = getProcIFlags();
3981  for (int i=2; i >= 0; --i)
3982  if (IFlags & (1 << i))
3983  OS << ARM_PROC::IFlagsToString(1 << i);
3984  OS << ">";
3985  break;
3986  }
3987  case k_Register:
3988  OS << "<register " << RegName(getReg()) << ">";
3989  break;
3990  case k_ShifterImmediate:
3991  OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
3992  << " #" << ShifterImm.Imm << ">";
3993  break;
3994  case k_ShiftedRegister:
3995  OS << "<so_reg_reg " << RegName(RegShiftedReg.SrcReg) << " "
3996  << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy) << " "
3997  << RegName(RegShiftedReg.ShiftReg) << ">";
3998  break;
3999  case k_ShiftedImmediate:
4000  OS << "<so_reg_imm " << RegName(RegShiftedImm.SrcReg) << " "
4001  << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy) << " #"
4002  << RegShiftedImm.ShiftImm << ">";
4003  break;
4004  case k_RotateImmediate:
4005  OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
4006  break;
4007  case k_ModifiedImmediate:
4008  OS << "<mod_imm #" << ModImm.Bits << ", #"
4009  << ModImm.Rot << ")>";
4010  break;
4011  case k_ConstantPoolImmediate:
4012  OS << "<constant_pool_imm #" << *getConstantPoolImm();
4013  break;
4014  case k_BitfieldDescriptor:
4015  OS << "<bitfield " << "lsb: " << Bitfield.LSB
4016  << ", width: " << Bitfield.Width << ">";
4017  break;
4018  case k_RegisterList:
4019  case k_RegisterListWithAPSR:
4020  case k_DPRRegisterList:
4021  case k_SPRRegisterList:
4022  case k_FPSRegisterListWithVPR:
4023  case k_FPDRegisterListWithVPR: {
4024  OS << "<register_list ";
4025 
4026  const SmallVectorImpl<unsigned> &RegList = getRegList();
4028  I = RegList.begin(), E = RegList.end(); I != E; ) {
4029  OS << RegName(*I);
4030  if (++I < E) OS << ", ";
4031  }
4032 
4033  OS << ">";
4034  break;
4035  }
4036  case k_VectorList:
4037  OS << "<vector_list " << VectorList.Count << " * "
4038  << RegName(VectorList.RegNum) << ">";
4039  break;
4040  case k_VectorListAllLanes:
4041  OS << "<vector_list(all lanes) " << VectorList.Count << " * "
4042  << RegName(VectorList.RegNum) << ">";
4043  break;
4044  case k_VectorListIndexed:
4045  OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
4046  << VectorList.Count << " * " << RegName(VectorList.RegNum) << ">";
4047  break;
4048  case k_Token:
4049  OS << "'" << getToken() << "'";
4050  break;
4051  case k_VectorIndex:
4052  OS << "<vectorindex " << getVectorIndex() << ">";
4053  break;
4054  }
4055 }
4056 
4057 /// @name Auto-generated Match Functions
4058 /// {
4059 
4060 static unsigned MatchRegisterName(StringRef Name);
4061 
4062 /// }
4063 
4064 bool ARMAsmParser::ParseRegister(unsigned &RegNo,
4065  SMLoc &StartLoc, SMLoc &EndLoc) {
4066  const AsmToken &Tok = getParser().getTok();
4067  StartLoc = Tok.getLoc();
4068  EndLoc = Tok.getEndLoc();
4069  RegNo = tryParseRegister();
4070 
4071  return (RegNo == (unsigned)-1);
4072 }
4073 
4074 OperandMatchResultTy ARMAsmParser::tryParseRegister(unsigned &RegNo,
4075  SMLoc &StartLoc,
4076  SMLoc &EndLoc) {
4077  if (ParseRegister(RegNo, StartLoc, EndLoc))
4078  return MatchOperand_NoMatch;
4079  return MatchOperand_Success;
4080 }
4081 
4082 /// Try to parse a register name. The token must be an Identifier when called,
4083 /// and if it is a register name the token is eaten and the register number is
4084 /// returned. Otherwise return -1.
4085 int ARMAsmParser::tryParseRegister() {
4086  MCAsmParser &Parser = getParser();
4087  const AsmToken &Tok = Parser.getTok();
4088  if (Tok.isNot(AsmToken::Identifier)) return -1;
4089 
4090  std::string lowerCase = Tok.getString().lower();
4091  unsigned RegNum = MatchRegisterName(lowerCase);
4092  if (!RegNum) {
4093  RegNum = StringSwitch<unsigned>(lowerCase)
4094  .Case("r13", ARM::SP)
4095  .Case("r14", ARM::LR)
4096  .Case("r15", ARM::PC)
4097  .Case("ip", ARM::R12)
4098  // Additional register name aliases for 'gas' compatibility.
4099  .Case("a1", ARM::R0)
4100  .Case("a2", ARM::R1)
4101  .Case("a3", ARM::R2)
4102  .Case("a4", ARM::R3)
4103  .Case("v1", ARM::R4)
4104  .Case("v2", ARM::R5)
4105  .Case("v3", ARM::R6)
4106  .Case("v4", ARM::R7)
4107  .Case("v5", ARM::R8)
4108  .Case("v6", ARM::R9)
4109  .Case("v7", ARM::R10)
4110  .Case("v8", ARM::R11)
4111  .Case("sb", ARM::R9)
4112  .Case("sl", ARM::R10)
4113  .Case("fp", ARM::R11)
4114  .Default(0);
4115  }
4116  if (!RegNum) {
4117  // Check for aliases registered via .req. Canonicalize to lower case.
4118  // That's more consistent since register names are case insensitive, and
4119  // it's how the original entry was passed in from MC/MCParser/AsmParser.
4120  StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
4121  // If no match, return failure.
4122  if (Entry == RegisterReqs.end())
4123  return -1;
4124  Parser.Lex(); // Eat identifier token.
4125  return Entry->getValue();
4126  }
4127 
4128  // Some FPUs only have 16 D registers, so D16-D31 are invalid
4129  if (!hasD32() && RegNum >= ARM::D16 && RegNum <= ARM::D31)
4130  return -1;
4131 
4132  Parser.Lex(); // Eat identifier token.
4133 
4134  return RegNum;
4135 }
4136 
4137 // Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0.
4138 // If a recoverable error occurs, return 1. If an irrecoverable error
4139 // occurs, return -1. An irrecoverable error is one where tokens have been
4140 // consumed in the process of trying to parse the shifter (i.e., when it is
4141 // indeed a shifter operand, but malformed).
4142 int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) {
4143  MCAsmParser &Parser = getParser();
4144  SMLoc S = Parser.getTok().getLoc();
4145  const AsmToken &Tok = Parser.getTok();
4146  if (Tok.isNot(AsmToken::Identifier))
4147  return -1;
4148 
4149  std::string lowerCase = Tok.getString().lower();
4151  .Case("asl", ARM_AM::lsl)
4152  .Case("lsl", ARM_AM::lsl)
4153  .Case("lsr", ARM_AM::lsr)
4154  .Case("asr", ARM_AM::asr)
4155  .Case("ror", ARM_AM::ror)
4156  .Case("rrx", ARM_AM::rrx)
4158 
4159  if (ShiftTy == ARM_AM::no_shift)
4160  return 1;
4161 
4162  Parser.Lex(); // Eat the operator.
4163 
4164  // The source register for the shift has already been added to the
4165  // operand list, so we need to pop it off and combine it into the shifted
4166  // register operand instead.
4167  std::unique_ptr<ARMOperand> PrevOp(
4168  (ARMOperand *)Operands.pop_back_val().release());
4169  if (!PrevOp->isReg())
4170  return Error(PrevOp->getStartLoc(), "shift must be of a register");
4171  int SrcReg = PrevOp->getReg();
4172 
4173  SMLoc EndLoc;
4174  int64_t Imm = 0;
4175  int ShiftReg = 0;
4176  if (ShiftTy == ARM_AM::rrx) {
4177  // RRX Doesn't have an explicit shift amount. The encoder expects
4178  // the shift register to be the same as the source register. Seems odd,
4179  // but OK.
4180  ShiftReg = SrcReg;
4181  } else {
4182  // Figure out if this is shifted by a constant or a register (for non-RRX).
4183  if (Parser.getTok().is(AsmToken::Hash) ||
4184  Parser.getTok().is(AsmToken::Dollar)) {
4185  Parser.Lex(); // Eat hash.
4186  SMLoc ImmLoc = Parser.getTok().getLoc();
4187  const MCExpr *ShiftExpr = nullptr;
4188  if (getParser().parseExpression(ShiftExpr, EndLoc)) {
4189  Error(ImmLoc, "invalid immediate shift value");
4190  return -1;
4191  }
4192  // The expression must be evaluatable as an immediate.
4193  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
4194  if (!CE) {
4195  Error(ImmLoc, "invalid immediate shift value");
4196  return -1;
4197  }
4198  // Range check the immediate.
4199  // lsl, ror: 0 <= imm <= 31
4200  // lsr, asr: 0 <= imm <= 32
4201  Imm = CE->getValue();
4202  if (Imm < 0 ||
4203  ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
4204  ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
4205  Error(ImmLoc, "immediate shift value out of range");
4206  return -1;
4207  }
4208  // shift by zero is a nop. Always send it through as lsl.
4209  // ('as' compatibility)
4210  if (Imm == 0)
4211  ShiftTy = ARM_AM::lsl;
4212  } else if (Parser.getTok().is(AsmToken::Identifier)) {
4213  SMLoc L = Parser.getTok().getLoc();
4214  EndLoc = Parser.getTok().getEndLoc();
4215  ShiftReg = tryParseRegister();
4216  if (ShiftReg == -1) {
4217  Error(L, "expected immediate or register in shift operand");
4218  return -1;
4219  }
4220  } else {
4221  Error(Parser.getTok().getLoc(),
4222  "expected immediate or register in shift operand");
4223  return -1;
4224  }
4225  }
4226 
4227  if (ShiftReg && ShiftTy != ARM_AM::rrx)
4228  Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
4229  ShiftReg, Imm,
4230  S, EndLoc));
4231  else
4232  Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
4233  S, EndLoc));
4234 
4235  return 0;
4236 }
4237 
4238 /// Try to parse a register name. The token must be an Identifier when called.
4239 /// If it's a register, an AsmOperand is created. Another AsmOperand is created
4240 /// if there is a "writeback". 'true' if it's not a register.
4241 ///
4242 /// TODO this is likely to change to allow different register types and or to
4243 /// parse for a specific register type.
4244 bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) {
4245  MCAsmParser &Parser = getParser();
4246  SMLoc RegStartLoc = Parser.getTok().getLoc();
4247  SMLoc RegEndLoc = Parser.getTok().getEndLoc();
4248  int RegNo = tryParseRegister();
4249  if (RegNo == -1)
4250  return true;
4251 
4252  Operands.push_back(ARMOperand::CreateReg(RegNo, RegStartLoc, RegEndLoc));
4253 
4254  const AsmToken &ExclaimTok = Parser.getTok();
4255  if (ExclaimTok.is(AsmToken::Exclaim)) {
4256  Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
4257  ExclaimTok.getLoc()));
4258  Parser.Lex(); // Eat exclaim token
4259  return false;
4260  }
4261 
4262  // Also check for an index operand. This is only legal for vector registers,
4263  // but that'll get caught OK in operand matching, so we don't need to
4264  // explicitly filter everything else out here.
4265  if (Parser.getTok().is(AsmToken::LBrac)) {
4266  SMLoc SIdx = Parser.getTok().getLoc();
4267  Parser.Lex(); // Eat left bracket token.
4268 
4269  const MCExpr *ImmVal;
4270  if (getParser().parseExpression(ImmVal))
4271  return true;
4272  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4273  if (!MCE)
4274  return TokError("immediate value expected for vector index");
4275 
4276  if (Parser.getTok().isNot(AsmToken::RBrac))
4277  return Error(Parser.getTok().getLoc(), "']' expected");
4278 
4279  SMLoc E = Parser.getTok().getEndLoc();
4280  Parser.Lex(); // Eat right bracket token.
4281 
4282  Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
4283  SIdx, E,
4284  getContext()));
4285  }
4286 
4287  return false;
4288 }
4289 
4290 /// MatchCoprocessorOperandName - Try to parse an coprocessor related
4291 /// instruction with a symbolic operand name.
4292 /// We accept "crN" syntax for GAS compatibility.
4293 /// <operand-name> ::= <prefix><number>
4294 /// If CoprocOp is 'c', then:
4295 /// <prefix> ::= c | cr
4296 /// If CoprocOp is 'p', then :
4297 /// <prefix> ::= p
4298 /// <number> ::= integer in range [0, 15]
4299 static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
4300  // Use the same layout as the tablegen'erated register name matcher. Ugly,
4301  // but efficient.
4302  if (Name.size() < 2 || Name[0] != CoprocOp)
4303  return -1;
4304  Name = (Name[1] == 'r') ? Name.drop_front(2) : Name.drop_front();
4305 
4306  switch (Name.size()) {
4307  default: return -1;
4308  case 1:
4309  switch (Name[0]) {
4310  default: return -1;
4311  case '0': return 0;
4312  case '1': return 1;
4313  case '2': return 2;
4314  case '3': return 3;
4315  case '4': return 4;
4316  case '5': return 5;
4317  case '6': return 6;
4318  case '7': return 7;
4319  case '8': return 8;
4320  case '9': return 9;
4321  }
4322  case 2:
4323  if (Name[0] != '1')
4324  return -1;
4325  switch (Name[1]) {
4326  default: return -1;
4327  // CP10 and CP11 are VFP/NEON and so vector instructions should be used.
4328  // However, old cores (v5/v6) did use them in that way.
4329  case '0': return 10;
4330  case '1': return 11;
4331  case '2': return 12;
4332  case '3': return 13;
4333  case '4': return 14;
4334  case '5': return 15;
4335  }
4336  }
4337 }
4338 
4339 /// parseITCondCode - Try to parse a condition code for an IT instruction.
4341 ARMAsmParser::parseITCondCode(OperandVector &Operands) {
4342  MCAsmParser &Parser = getParser();
4343  SMLoc S = Parser.getTok().getLoc();
4344  const AsmToken &Tok = Parser.getTok();
4345  if (!Tok.is(AsmToken::Identifier))
4346  return MatchOperand_NoMatch;
4347  unsigned CC = ARMCondCodeFromString(Tok.getString());
4348  if (CC == ~0U)
4349  return MatchOperand_NoMatch;
4350  Parser.Lex(); // Eat the token.
4351 
4352  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
4353 
4354  return MatchOperand_Success;
4355 }
4356 
4357 /// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
4358 /// token must be an Identifier when called, and if it is a coprocessor
4359 /// number, the token is eaten and the operand is added to the operand list.
4361 ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) {
4362  MCAsmParser &Parser = getParser();
4363  SMLoc S = Parser.getTok().getLoc();
4364  const AsmToken &Tok = Parser.getTok();
4365  if (Tok.isNot(AsmToken::Identifier))
4366  return MatchOperand_NoMatch;
4367 
4368  int Num = MatchCoprocessorOperandName(Tok.getString().lower(), 'p');
4369  if (Num == -1)
4370  return MatchOperand_NoMatch;
4371  if (!isValidCoprocessorNumber(Num, getSTI().getFeatureBits()))
4372  return MatchOperand_NoMatch;
4373 
4374  Parser.Lex(); // Eat identifier token.
4375  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
4376  return MatchOperand_Success;
4377 }
4378 
4379 /// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
4380 /// token must be an Identifier when called, and if it is a coprocessor
4381 /// number, the token is eaten and the operand is added to the operand list.
4383 ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) {
4384  MCAsmParser &Parser = getParser();
4385  SMLoc S = Parser.getTok().getLoc();
4386  const AsmToken &Tok = Parser.getTok();
4387  if (Tok.isNot(AsmToken::Identifier))
4388  return MatchOperand_NoMatch;
4389 
4390  int Reg = MatchCoprocessorOperandName(Tok.getString().lower(), 'c');
4391  if (Reg == -1)
4392  return MatchOperand_NoMatch;
4393 
4394  Parser.Lex(); // Eat identifier token.
4395  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
4396  return MatchOperand_Success;
4397 }
4398 
4399 /// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
4400 /// coproc_option : '{' imm0_255 '}'
4402 ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands) {
4403  MCAsmParser &Parser = getParser();
4404  SMLoc S = Parser.getTok().getLoc();
4405 
4406  // If this isn't a '{', this isn't a coprocessor immediate operand.
4407  if (Parser.getTok().isNot(AsmToken::LCurly))
4408  return MatchOperand_NoMatch;
4409  Parser.Lex(); // Eat the '{'
4410 
4411  const MCExpr *Expr;
4412  SMLoc Loc = Parser.getTok().getLoc();
4413  if (getParser().parseExpression(Expr)) {
4414  Error(Loc, "illegal expression");
4415  return MatchOperand_ParseFail;
4416  }
4417  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4418  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
4419  Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
4420  return MatchOperand_ParseFail;
4421  }
4422  int Val = CE->getValue();
4423 
4424  // Check for and consume the closing '}'
4425  if (Parser.getTok().isNot(AsmToken::RCurly))
4426  return MatchOperand_ParseFail;
4427  SMLoc E = Parser.getTok().getEndLoc();
4428  Parser.Lex(); // Eat the '}'
4429 
4430  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
4431  return MatchOperand_Success;
4432 }
4433 
4434 // For register list parsing, we need to map from raw GPR register numbering
4435 // to the enumeration values. The enumeration values aren't sorted by
4436 // register number due to our using "sp", "lr" and "pc" as canonical names.
4437 static unsigned getNextRegister(unsigned Reg) {
4438  // If this is a GPR, we need to do it manually, otherwise we can rely
4439  // on the sort ordering of the enumeration since the other reg-classes
4440  // are sane.
4441  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4442  return Reg + 1;
4443  switch(Reg) {
4444  default: llvm_unreachable("Invalid GPR number!");
4445  case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2;
4446  case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4;
4447  case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6;
4448  case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8;
4449  case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10;
4450  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
4451  case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR;
4452  case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0;
4453  }
4454 }
4455 
4456 // Insert an <Encoding, Register> pair in an ordered vector. Return true on
4457 // success, or false, if duplicate encoding found.
4458 static bool
4459 insertNoDuplicates(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
4460  unsigned Enc, unsigned Reg) {
4461  Regs.emplace_back(Enc, Reg);
4462  for (auto I = Regs.rbegin(), J = I + 1, E = Regs.rend(); J != E; ++I, ++J) {
4463  if (J->first == Enc) {
4464  Regs.erase(J.base());
4465  return false;
4466  }
4467  if (J->first < Enc)
4468  break;
4469  std::swap(*I, *J);
4470  }
4471  return true;
4472 }
4473 
4474 /// Parse a register list.
4475 bool ARMAsmParser::parseRegisterList(OperandVector &Operands, bool EnforceOrder,
4476  bool AllowRAAC) {
4477  MCAsmParser &Parser = getParser();
4478  if (Parser.getTok().isNot(AsmToken::LCurly))
4479  return TokError("Token is not a Left Curly Brace");
4480  SMLoc S = Parser.getTok().getLoc();
4481  Parser.Lex(); // Eat '{' token.
4482  SMLoc RegLoc = Parser.getTok().getLoc();
4483 
4484  // Check the first register in the list to see what register class
4485  // this is a list of.
4486  int Reg = tryParseRegister();
4487  if (Reg == -1)
4488  return Error(RegLoc, "register expected");
4489  if (!AllowRAAC && Reg == ARM::RA_AUTH_CODE)
4490  return Error(RegLoc, "pseudo-register not allowed");
4491  // The reglist instructions have at most 16 registers, so reserve
4492  // space for that many.
4493  int EReg = 0;
4495 
4496  // Allow Q regs and just interpret them as the two D sub-registers.
4497  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4498  Reg = getDRegFromQReg(Reg);
4499  EReg = MRI->getEncodingValue(Reg);
4500  Registers.emplace_back(EReg, Reg);
4501  ++Reg;
4502  }
4503  const MCRegisterClass *RC;
4504  if (Reg == ARM::RA_AUTH_CODE ||
4505  ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4506  RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
4507  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
4508  RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
4509  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
4510  RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
4511  else if (ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
4512  RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4513  else
4514  return Error(RegLoc, "invalid register in register list");
4515 
4516  // Store the register.
4517  EReg = MRI->getEncodingValue(Reg);
4518  Registers.emplace_back(EReg, Reg);
4519 
4520  // This starts immediately after the first register token in the list,
4521  // so we can see either a comma or a minus (range separator) as a legal
4522  // next token.
4523  while (Parser.getTok().is(AsmToken::Comma) ||
4524  Parser.getTok().is(AsmToken::Minus)) {
4525  if (Parser.getTok().is(AsmToken::Minus)) {
4526  if (Reg == ARM::RA_AUTH_CODE)
4527  return Error(RegLoc, "pseudo-register not allowed");
4528  Parser.Lex(); // Eat the minus.
4529  SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4530  int EndReg = tryParseRegister();
4531  if (EndReg == -1)
4532  return Error(AfterMinusLoc, "register expected");
4533  if (EndReg == ARM::RA_AUTH_CODE)
4534  return Error(AfterMinusLoc, "pseudo-register not allowed");
4535  // Allow Q regs and just interpret them as the two D sub-registers.
4536  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
4537  EndReg = getDRegFromQReg(EndReg) + 1;
4538  // If the register is the same as the start reg, there's nothing
4539  // more to do.
4540  if (Reg == EndReg)
4541  continue;
4542  // The register must be in the same register class as the first.
4543  if (!RC->contains(Reg))
4544  return Error(AfterMinusLoc, "invalid register in register list");
4545  // Ranges must go from low to high.
4546  if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
4547  return Error(AfterMinusLoc, "bad range in register list");
4548 
4549  // Add all the registers in the range to the register list.
4550  while (Reg != EndReg) {
4551  Reg = getNextRegister(Reg);
4552  EReg = MRI->getEncodingValue(Reg);
4553  if (!insertNoDuplicates(Registers, EReg, Reg)) {
4554  Warning(AfterMinusLoc, StringRef("duplicated register (") +
4556  ") in register list");
4557  }
4558  }
4559  continue;
4560  }
4561  Parser.Lex(); // Eat the comma.
4562  RegLoc = Parser.getTok().getLoc();
4563  int OldReg = Reg;
4564  const AsmToken RegTok = Parser.getTok();
4565  Reg = tryParseRegister();
4566  if (Reg == -1)
4567  return Error(RegLoc, "register expected");
4568  if (!AllowRAAC && Reg == ARM::RA_AUTH_CODE)
4569  return Error(RegLoc, "pseudo-register not allowed");
4570  // Allow Q regs and just interpret them as the two D sub-registers.
4571  bool isQReg = false;
4572  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4573  Reg = getDRegFromQReg(Reg);
4574  isQReg = true;
4575  }
4576  if (Reg != ARM::RA_AUTH_CODE && !RC->contains(Reg) &&
4577  RC->getID() == ARMMCRegisterClasses[ARM::GPRRegClassID].getID() &&
4578  ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg)) {
4579  // switch the register classes, as GPRwithAPSRnospRegClassID is a partial
4580  // subset of GPRRegClassId except it contains APSR as well.
4581  RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4582  }
4583  if (Reg == ARM::VPR &&
4584  (RC == &ARMMCRegisterClasses[ARM::SPRRegClassID] ||
4585  RC == &ARMMCRegisterClasses[ARM::DPRRegClassID] ||
4586  RC == &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID])) {
4587  RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID];
4588  EReg = MRI->getEncodingValue(Reg);
4589  if (!insertNoDuplicates(Registers, EReg, Reg)) {
4590  Warning(RegLoc, "duplicated register (" + RegTok.getString() +
4591  ") in register list");
4592  }
4593  continue;
4594  }
4595  // The register must be in the same register class as the first.
4596  if ((Reg == ARM::RA_AUTH_CODE &&
4597  RC != &ARMMCRegisterClasses[ARM::GPRRegClassID]) ||
4598  (Reg != ARM::RA_AUTH_CODE && !RC->contains(Reg)))
4599  return Error(RegLoc, "invalid register in register list");
4600  // In most cases, the list must be monotonically increasing. An
4601  // exception is CLRM, which is order-independent anyway, so
4602  // there's no potential for confusion if you write clrm {r2,r1}
4603  // instead of clrm {r1,r2}.
4604  if (EnforceOrder &&
4605  MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
4606  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4607  Warning(RegLoc, "register list not in ascending order");
4608  else if (!ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
4609  return Error(RegLoc, "register list not in ascending order");
4610  }
4611  // VFP register lists must also be contiguous.
4612  if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
4613  RC != &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID] &&
4614  Reg != OldReg + 1)
4615  return Error(RegLoc, "non-contiguous register range");
4616  EReg = MRI->getEncodingValue(Reg);
4617  if (!insertNoDuplicates(Registers, EReg, Reg)) {
4618  Warning(RegLoc, "duplicated register (" + RegTok.getString() +
4619  ") in register list");
4620  }
4621  if (isQReg) {
4622  EReg = MRI->getEncodingValue(++Reg);
4623  Registers.emplace_back(EReg, Reg);
4624  }
4625  }
4626 
4627  if (Parser.getTok().isNot(AsmToken::RCurly))
4628  return Error(Parser.getTok().getLoc(), "'}' expected");
4629  SMLoc E = Parser.getTok().getEndLoc();
4630  Parser.Lex(); // Eat '}' token.
4631 
4632  // Push the register list operand.
4633  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
4634 
4635  // The ARM system instruction variants for LDM/STM have a '^' token here.
4636  if (Parser.getTok().is(AsmToken::Caret)) {
4637  Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
4638  Parser.Lex(); // Eat '^' token.
4639  }
4640 
4641  return false;
4642 }
4643 
4644 // Helper function to parse the lane index for vector lists.
4645 OperandMatchResultTy ARMAsmParser::
4646 parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) {
4647  MCAsmParser &Parser = getParser();
4648  Index = 0; // Always return a defined index value.
4649  if (Parser.getTok().is(AsmToken::LBrac)) {
4650  Parser.Lex(); // Eat the '['.
4651  if (Parser.getTok().is(AsmToken::RBrac)) {
4652  // "Dn[]" is the 'all lanes' syntax.
4653  LaneKind = AllLanes;
4654  EndLoc = Parser.getTok().getEndLoc();
4655  Parser.Lex(); // Eat the ']'.
4656  return MatchOperand_Success;
4657  }
4658 
4659  // There's an optional '#' token here. Normally there wouldn't be, but
4660  // inline assemble puts one in, and it's friendly to accept that.
4661  if (Parser.getTok().is(AsmToken::Hash))
4662  Parser.Lex(); // Eat '#' or '$'.
4663 
4664  const MCExpr *LaneIndex;
4665  SMLoc Loc = Parser.getTok().getLoc();
4666  if (getParser().parseExpression(LaneIndex)) {
4667  Error(Loc, "illegal expression");
4668  return MatchOperand_ParseFail;
4669  }
4670  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
4671  if (!CE) {
4672  Error(Loc, "lane index must be empty or an integer");
4673  return MatchOperand_ParseFail;
4674  }
4675  if (Parser.getTok().isNot(AsmToken::RBrac)) {
4676  Error(Parser.getTok().getLoc(), "']' expected");
4677  return MatchOperand_ParseFail;
4678  }
4679  EndLoc = Parser.getTok().getEndLoc();
4680  Parser.Lex(); // Eat the ']'.
4681  int64_t Val = CE->getValue();
4682 
4683  // FIXME: Make this range check context sensitive for .8, .16, .32.
4684  if (Val < 0 || Val > 7) {
4685  Error(Parser.getTok().getLoc(), "lane index out of range");
4686  return MatchOperand_ParseFail;
4687  }
4688  Index = Val;
4689  LaneKind = IndexedLane;
4690  return MatchOperand_Success;
4691  }
4692  LaneKind = NoLanes;
4693  return MatchOperand_Success;
4694 }
4695 
4696 // parse a vector register list
4698 ARMAsmParser::parseVectorList(OperandVector &Operands) {
4699  MCAsmParser &Parser = getParser();
4700  VectorLaneTy LaneKind;
4701  unsigned LaneIndex;
4702  SMLoc S = Parser.getTok().getLoc();
4703  // As an extension (to match gas), support a plain D register or Q register
4704  // (without encosing curly braces) as a single or double entry list,
4705  // respectively.
4706  if (!hasMVE() && Parser.getTok().is(AsmToken::Identifier)) {
4707  SMLoc E = Parser.getTok().getEndLoc();
4708  int Reg = tryParseRegister();
4709  if (Reg == -1)
4710  return MatchOperand_NoMatch;
4711  if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
4712  OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
4713  if (Res != MatchOperand_Success)
4714  return Res;
4715  switch (LaneKind) {
4716  case NoLanes:
4717  Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
4718  break;
4719  case AllLanes:
4720  Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
4721  S, E));
4722  break;
4723  case IndexedLane:
4724  Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
4725  LaneIndex,
4726  false, S, E));
4727  break;
4728  }
4729  return MatchOperand_Success;
4730  }
4731  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4732  Reg = getDRegFromQReg(Reg);
4733  OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
4734  if (Res != MatchOperand_Success)
4735  return Res;
4736  switch (LaneKind) {
4737  case NoLanes:
4738  Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
4739  &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4740  Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
4741  break;
4742  case AllLanes:
4743  Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
4744  &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4745  Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
4746  S, E));
4747  break;
4748  case IndexedLane:
4749  Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
4750  LaneIndex,
4751  false, S, E));
4752  break;
4753  }
4754  return MatchOperand_Success;
4755  }
4756  Error(S, "vector register expected");
4757  return MatchOperand_ParseFail;
4758  }
4759 
4760  if (Parser.getTok().isNot(AsmToken::LCurly))
4761  return MatchOperand_NoMatch;
4762 
4763  Parser.Lex(); // Eat '{' token.
4764  SMLoc RegLoc = Parser.getTok().getLoc();
4765 
4766  int Reg = tryParseRegister();
4767  if (Reg == -1) {
4768  Error(RegLoc, "register expected");
4769  return MatchOperand_ParseFail;
4770  }
4771  unsigned Count = 1;
4772  int Spacing = 0;
4773  unsigned FirstReg = Reg;
4774 
4775  if (hasMVE() && !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg)) {
4776  Error(Parser.getTok().getLoc(), "vector register in range Q0-Q7 expected");
4777  return MatchOperand_ParseFail;
4778  }
4779  // The list is of D registers, but we also allow Q regs and just interpret
4780  // them as the two D sub-registers.
4781  else if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4782  FirstReg = Reg = getDRegFromQReg(Reg);
4783  Spacing = 1; // double-spacing requires explicit D registers, otherwise
4784  // it's ambiguous with four-register single spaced.
4785  ++Reg;
4786  ++Count;
4787  }
4788 
4789  SMLoc E;
4790  if (parseVectorLane(LaneKind, LaneIndex, E) != MatchOperand_Success)
4791  return MatchOperand_ParseFail;
4792 
4793  while (Parser.getTok().is(AsmToken::Comma) ||
4794  Parser.getTok().is(AsmToken::Minus)) {
4795  if (Parser.getTok().is(AsmToken::Minus)) {
4796  if (!Spacing)
4797  Spacing = 1; // Register range implies a single spaced list.
4798  else if (Spacing == 2) {
4799  Error(Parser.getTok().getLoc(),
4800  "sequential registers in double spaced list");
4801  return MatchOperand_ParseFail;
4802  }
4803  Parser.Lex(); // Eat the minus.
4804  SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4805  int EndReg = tryParseRegister();
4806  if (EndReg == -1) {
4807  Error(AfterMinusLoc, "register expected");
4808  return MatchOperand_ParseFail;
4809  }
4810  // Allow Q regs and just interpret them as the two D sub-registers.
4811  if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
4812  EndReg = getDRegFromQReg(EndReg) + 1;
4813  // If the register is the same as the start reg, there's nothing
4814  // more to do.
4815  if (Reg == EndReg)
4816  continue;
4817  // The register must be in the same register class as the first.
4818  if ((hasMVE() &&
4819  !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(EndReg)) ||
4820  (!hasMVE() &&
4821  !ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg))) {
4822  Error(AfterMinusLoc, "invalid register in register list");
4823  return MatchOperand_ParseFail;
4824  }
4825  // Ranges must go from low to high.
4826  if (Reg > EndReg) {
4827  Error(AfterMinusLoc, "bad range in register list");
4828  return MatchOperand_ParseFail;
4829  }
4830  // Parse the lane specifier if present.
4831  VectorLaneTy NextLaneKind;
4832  unsigned NextLaneIndex;
4833  if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
4835  return MatchOperand_ParseFail;
4836  if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4837  Error(AfterMinusLoc, "mismatched lane index in register list");
4838  return MatchOperand_ParseFail;
4839  }
4840 
4841  // Add all the registers in the range to the register list.
4842  Count += EndReg - Reg;
4843  Reg = EndReg;
4844  continue;
4845  }
4846  Parser.Lex(); // Eat the comma.
4847  RegLoc = Parser.getTok().getLoc();
4848  int OldReg = Reg;
4849  Reg = tryParseRegister();
4850  if (Reg == -1) {
4851  Error(RegLoc, "register expected");
4852  return MatchOperand_ParseFail;
4853  }
4854 
4855  if (hasMVE()) {
4856  if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg)) {
4857  Error(RegLoc, "vector register in range Q0-Q7 expected");
4858  return MatchOperand_ParseFail;
4859  }
4860  Spacing = 1;
4861  }
4862  // vector register lists must be contiguous.
4863  // It's OK to use the enumeration values directly here rather, as the
4864  // VFP register classes have the enum sorted properly.
4865  //
4866  // The list is of D registers, but we also allow Q regs and just interpret
4867  // them as the two D sub-registers.
4868  else if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4869  if (!Spacing)
4870  Spacing = 1; // Register range implies a single spaced list.
4871  else if (Spacing == 2) {
4872  Error(RegLoc,
4873  "invalid register in double-spaced list (must be 'D' register')");
4874  return MatchOperand_ParseFail;
4875  }
4876  Reg = getDRegFromQReg(Reg);
4877  if (Reg != OldReg + 1) {
4878  Error(RegLoc, "non-contiguous register range");
4879  return MatchOperand_ParseFail;
4880  }
4881  ++Reg;
4882  Count += 2;
4883  // Parse the lane specifier if present.
4884  VectorLaneTy NextLaneKind;
4885  unsigned NextLaneIndex;
4886  SMLoc LaneLoc = Parser.getTok().getLoc();
4887  if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
4889  return MatchOperand_ParseFail;
4890  if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4891  Error(LaneLoc, "mismatched lane index in register list");
4892  return MatchOperand_ParseFail;
4893  }
4894  continue;
4895  }
4896  // Normal D register.
4897  // Figure out the register spacing (single or double) of the list if
4898  // we don't know it already.
4899  if (!Spacing)
4900  Spacing = 1 + (Reg == OldReg + 2);
4901 
4902  // Just check that it's contiguous and keep going.
4903  if (Reg != OldReg + Spacing) {
4904  Error(RegLoc, "non-contiguous register range");
4905  return MatchOperand_ParseFail;
4906  }
4907  ++Count;
4908  // Parse the lane specifier if present.
4909  VectorLaneTy NextLaneKind;
4910  unsigned NextLaneIndex;
4911  SMLoc EndLoc = Parser.getTok().getLoc();
4912  if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != MatchOperand_Success)
4913  return MatchOperand_ParseFail;
4914  if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4915  Error(EndLoc, "mismatched lane index in register list");
4916  return MatchOperand_ParseFail;
4917  }
4918  }
4919 
4920  if (Parser.getTok().isNot(AsmToken::RCurly)) {
4921  Error(Parser.getTok().getLoc(), "'}' expected");
4922  return MatchOperand_ParseFail;
4923  }
4924  E = Parser.getTok().getEndLoc();
4925  Parser.Lex(); // Eat '}' token.
4926 
4927  switch (LaneKind) {
4928  case NoLanes:
4929  case AllLanes: {
4930  // Two-register operands have been converted to the
4931  // composite register classes.
4932  if (Count == 2 && !hasMVE()) {
4933  const MCRegisterClass *RC = (Spacing == 1) ?