LLVM  13.0.0git
ARMAsmParser.cpp
Go to the documentation of this file.
1 //===- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "ARMFeatures.h"
10 #include "ARMBaseInstrInfo.h"
11 #include "Utils/ARMBaseInfo.h"
15 #include "MCTargetDesc/ARMMCExpr.h"
18 #include "llvm/ADT/APFloat.h"
19 #include "llvm/ADT/APInt.h"
20 #include "llvm/ADT/None.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringMap.h"
25 #include "llvm/ADT/StringSet.h"
26 #include "llvm/ADT/StringRef.h"
27 #include "llvm/ADT/StringSwitch.h"
28 #include "llvm/ADT/Triple.h"
29 #include "llvm/ADT/Twine.h"
30 #include "llvm/MC/MCContext.h"
31 #include "llvm/MC/MCExpr.h"
32 #include "llvm/MC/MCInst.h"
33 #include "llvm/MC/MCInstrDesc.h"
34 #include "llvm/MC/MCInstrInfo.h"
42 #include "llvm/MC/MCRegisterInfo.h"
43 #include "llvm/MC/MCSection.h"
44 #include "llvm/MC/MCStreamer.h"
46 #include "llvm/MC/MCSymbol.h"
49 #include "llvm/Support/ARMEHABI.h"
50 #include "llvm/Support/Casting.h"
52 #include "llvm/Support/Compiler.h"
55 #include "llvm/Support/SMLoc.h"
59 #include <algorithm>
60 #include <cassert>
61 #include <cstddef>
62 #include <cstdint>
63 #include <iterator>
64 #include <limits>
65 #include <memory>
66 #include <string>
67 #include <utility>
68 #include <vector>
69 
70 #define DEBUG_TYPE "asm-parser"
71 
72 using namespace llvm;
73 
74 namespace llvm {
75 extern const MCInstrDesc ARMInsts[];
76 } // end namespace llvm
77 
78 namespace {
79 
80 enum class ImplicitItModeTy { Always, Never, ARMOnly, ThumbOnly };
81 
82 static cl::opt<ImplicitItModeTy> ImplicitItMode(
83  "arm-implicit-it", cl::init(ImplicitItModeTy::ARMOnly),
84  cl::desc("Allow conditional instructions outdside of an IT block"),
85  cl::values(clEnumValN(ImplicitItModeTy::Always, "always",
86  "Accept in both ISAs, emit implicit ITs in Thumb"),
87  clEnumValN(ImplicitItModeTy::Never, "never",
88  "Warn in ARM, reject in Thumb"),
89  clEnumValN(ImplicitItModeTy::ARMOnly, "arm",
90  "Accept in ARM, reject in Thumb"),
91  clEnumValN(ImplicitItModeTy::ThumbOnly, "thumb",
92  "Warn in ARM, emit implicit ITs in Thumb")));
93 
94 static cl::opt<bool> AddBuildAttributes("arm-add-build-attributes",
95  cl::init(false));
96 
97 enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
98 
99 static inline unsigned extractITMaskBit(unsigned Mask, unsigned Position) {
100  // Position==0 means we're not in an IT block at all. Position==1
101  // means we want the first state bit, which is always 0 (Then).
102  // Position==2 means we want the second state bit, stored at bit 3
103  // of Mask, and so on downwards. So (5 - Position) will shift the
104  // right bit down to bit 0, including the always-0 bit at bit 4 for
105  // the mandatory initial Then.
106  return (Mask >> (5 - Position) & 1);
107 }
108 
109 class UnwindContext {
110  using Locs = SmallVector<SMLoc, 4>;
111 
112  MCAsmParser &Parser;
113  Locs FnStartLocs;
114  Locs CantUnwindLocs;
115  Locs PersonalityLocs;
116  Locs PersonalityIndexLocs;
117  Locs HandlerDataLocs;
118  int FPReg;
119 
120 public:
121  UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {}
122 
123  bool hasFnStart() const { return !FnStartLocs.empty(); }
124  bool cantUnwind() const { return !CantUnwindLocs.empty(); }
125  bool hasHandlerData() const { return !HandlerDataLocs.empty(); }
126 
127  bool hasPersonality() const {
128  return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
129  }
130 
131  void recordFnStart(SMLoc L) { FnStartLocs.push_back(L); }
132  void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(L); }
133  void recordPersonality(SMLoc L) { PersonalityLocs.push_back(L); }
134  void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(L); }
135  void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(L); }
136 
137  void saveFPReg(int Reg) { FPReg = Reg; }
138  int getFPReg() const { return FPReg; }
139 
140  void emitFnStartLocNotes() const {
141  for (Locs::const_iterator FI = FnStartLocs.begin(), FE = FnStartLocs.end();
142  FI != FE; ++FI)
143  Parser.Note(*FI, ".fnstart was specified here");
144  }
145 
146  void emitCantUnwindLocNotes() const {
147  for (Locs::const_iterator UI = CantUnwindLocs.begin(),
148  UE = CantUnwindLocs.end(); UI != UE; ++UI)
149  Parser.Note(*UI, ".cantunwind was specified here");
150  }
151 
152  void emitHandlerDataLocNotes() const {
153  for (Locs::const_iterator HI = HandlerDataLocs.begin(),
154  HE = HandlerDataLocs.end(); HI != HE; ++HI)
155  Parser.Note(*HI, ".handlerdata was specified here");
156  }
157 
158  void emitPersonalityLocNotes() const {
159  for (Locs::const_iterator PI = PersonalityLocs.begin(),
160  PE = PersonalityLocs.end(),
161  PII = PersonalityIndexLocs.begin(),
162  PIE = PersonalityIndexLocs.end();
163  PI != PE || PII != PIE;) {
164  if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
165  Parser.Note(*PI++, ".personality was specified here");
166  else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
167  Parser.Note(*PII++, ".personalityindex was specified here");
168  else
169  llvm_unreachable(".personality and .personalityindex cannot be "
170  "at the same location");
171  }
172  }
173 
174  void reset() {
175  FnStartLocs = Locs();
176  CantUnwindLocs = Locs();
177  PersonalityLocs = Locs();
178  HandlerDataLocs = Locs();
179  PersonalityIndexLocs = Locs();
180  FPReg = ARM::SP;
181  }
182 };
183 
184 // Various sets of ARM instruction mnemonics which are used by the asm parser
185 class ARMMnemonicSets {
186  StringSet<> CDE;
187  StringSet<> CDEWithVPTSuffix;
188 public:
189  ARMMnemonicSets(const MCSubtargetInfo &STI);
190 
191  /// Returns true iff a given mnemonic is a CDE instruction
192  bool isCDEInstr(StringRef Mnemonic) {
193  // Quick check before searching the set
194  if (!Mnemonic.startswith("cx") && !Mnemonic.startswith("vcx"))
195  return false;
196  return CDE.count(Mnemonic);
197  }
198 
199  /// Returns true iff a given mnemonic is a VPT-predicable CDE instruction
200  /// (possibly with a predication suffix "e" or "t")
201  bool isVPTPredicableCDEInstr(StringRef Mnemonic) {
202  if (!Mnemonic.startswith("vcx"))
203  return false;
204  return CDEWithVPTSuffix.count(Mnemonic);
205  }
206 
207  /// Returns true iff a given mnemonic is an IT-predicable CDE instruction
208  /// (possibly with a condition suffix)
209  bool isITPredicableCDEInstr(StringRef Mnemonic) {
210  if (!Mnemonic.startswith("cx"))
211  return false;
212  return Mnemonic.startswith("cx1a") || Mnemonic.startswith("cx1da") ||
213  Mnemonic.startswith("cx2a") || Mnemonic.startswith("cx2da") ||
214  Mnemonic.startswith("cx3a") || Mnemonic.startswith("cx3da");
215  }
216 
217  /// Return true iff a given mnemonic is an integer CDE instruction with
218  /// dual-register destination
219  bool isCDEDualRegInstr(StringRef Mnemonic) {
220  if (!Mnemonic.startswith("cx"))
221  return false;
222  return Mnemonic == "cx1d" || Mnemonic == "cx1da" ||
223  Mnemonic == "cx2d" || Mnemonic == "cx2da" ||
224  Mnemonic == "cx3d" || Mnemonic == "cx3da";
225  }
226 };
227 
228 ARMMnemonicSets::ARMMnemonicSets(const MCSubtargetInfo &STI) {
229  for (StringRef Mnemonic: { "cx1", "cx1a", "cx1d", "cx1da",
230  "cx2", "cx2a", "cx2d", "cx2da",
231  "cx3", "cx3a", "cx3d", "cx3da", })
232  CDE.insert(Mnemonic);
233  for (StringRef Mnemonic :
234  {"vcx1", "vcx1a", "vcx2", "vcx2a", "vcx3", "vcx3a"}) {
235  CDE.insert(Mnemonic);
236  CDEWithVPTSuffix.insert(Mnemonic);
237  CDEWithVPTSuffix.insert(std::string(Mnemonic) + "t");
238  CDEWithVPTSuffix.insert(std::string(Mnemonic) + "e");
239  }
240 }
241 
242 class ARMAsmParser : public MCTargetAsmParser {
243  const MCRegisterInfo *MRI;
244  UnwindContext UC;
245  ARMMnemonicSets MS;
246 
247  ARMTargetStreamer &getTargetStreamer() {
248  assert(getParser().getStreamer().getTargetStreamer() &&
249  "do not have a target streamer");
250  MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
251  return static_cast<ARMTargetStreamer &>(TS);
252  }
253 
254  // Map of register aliases registers via the .req directive.
255  StringMap<unsigned> RegisterReqs;
256 
257  bool NextSymbolIsThumb;
258 
259  bool useImplicitITThumb() const {
260  return ImplicitItMode == ImplicitItModeTy::Always ||
261  ImplicitItMode == ImplicitItModeTy::ThumbOnly;
262  }
263 
264  bool useImplicitITARM() const {
265  return ImplicitItMode == ImplicitItModeTy::Always ||
266  ImplicitItMode == ImplicitItModeTy::ARMOnly;
267  }
268 
269  struct {
270  ARMCC::CondCodes Cond; // Condition for IT block.
271  unsigned Mask:4; // Condition mask for instructions.
272  // Starting at first 1 (from lsb).
273  // '1' condition as indicated in IT.
274  // '0' inverse of condition (else).
275  // Count of instructions in IT block is
276  // 4 - trailingzeroes(mask)
277  // Note that this does not have the same encoding
278  // as in the IT instruction, which also depends
279  // on the low bit of the condition code.
280 
281  unsigned CurPosition; // Current position in parsing of IT
282  // block. In range [0,4], with 0 being the IT
283  // instruction itself. Initialized according to
284  // count of instructions in block. ~0U if no
285  // active IT block.
286 
287  bool IsExplicit; // true - The IT instruction was present in the
288  // input, we should not modify it.
289  // false - The IT instruction was added
290  // implicitly, we can extend it if that
291  // would be legal.
292  } ITState;
293 
294  SmallVector<MCInst, 4> PendingConditionalInsts;
295 
296  void flushPendingInstructions(MCStreamer &Out) override {
297  if (!inImplicitITBlock()) {
298  assert(PendingConditionalInsts.size() == 0);
299  return;
300  }
301 
302  // Emit the IT instruction
303  MCInst ITInst;
304  ITInst.setOpcode(ARM::t2IT);
305  ITInst.addOperand(MCOperand::createImm(ITState.Cond));
306  ITInst.addOperand(MCOperand::createImm(ITState.Mask));
307  Out.emitInstruction(ITInst, getSTI());
308 
309  // Emit the conditonal instructions
310  assert(PendingConditionalInsts.size() <= 4);
311  for (const MCInst &Inst : PendingConditionalInsts) {
312  Out.emitInstruction(Inst, getSTI());
313  }
314  PendingConditionalInsts.clear();
315 
316  // Clear the IT state
317  ITState.Mask = 0;
318  ITState.CurPosition = ~0U;
319  }
320 
321  bool inITBlock() { return ITState.CurPosition != ~0U; }
322  bool inExplicitITBlock() { return inITBlock() && ITState.IsExplicit; }
323  bool inImplicitITBlock() { return inITBlock() && !ITState.IsExplicit; }
324 
325  bool lastInITBlock() {
326  return ITState.CurPosition == 4 - countTrailingZeros(ITState.Mask);
327  }
328 
329  void forwardITPosition() {
330  if (!inITBlock()) return;
331  // Move to the next instruction in the IT block, if there is one. If not,
332  // mark the block as done, except for implicit IT blocks, which we leave
333  // open until we find an instruction that can't be added to it.
334  unsigned TZ = countTrailingZeros(ITState.Mask);
335  if (++ITState.CurPosition == 5 - TZ && ITState.IsExplicit)
336  ITState.CurPosition = ~0U; // Done with the IT block after this.
337  }
338 
339  // Rewind the state of the current IT block, removing the last slot from it.
340  void rewindImplicitITPosition() {
341  assert(inImplicitITBlock());
342  assert(ITState.CurPosition > 1);
343  ITState.CurPosition--;
344  unsigned TZ = countTrailingZeros(ITState.Mask);
345  unsigned NewMask = 0;
346  NewMask |= ITState.Mask & (0xC << TZ);
347  NewMask |= 0x2 << TZ;
348  ITState.Mask = NewMask;
349  }
350 
351  // Rewind the state of the current IT block, removing the last slot from it.
352  // If we were at the first slot, this closes the IT block.
353  void discardImplicitITBlock() {
354  assert(inImplicitITBlock());
355  assert(ITState.CurPosition == 1);
356  ITState.CurPosition = ~0U;
357  }
358 
359  // Return the low-subreg of a given Q register.
360  unsigned getDRegFromQReg(unsigned QReg) const {
361  return MRI->getSubReg(QReg, ARM::dsub_0);
362  }
363 
364  // Get the condition code corresponding to the current IT block slot.
365  ARMCC::CondCodes currentITCond() {
366  unsigned MaskBit = extractITMaskBit(ITState.Mask, ITState.CurPosition);
367  return MaskBit ? ARMCC::getOppositeCondition(ITState.Cond) : ITState.Cond;
368  }
369 
370  // Invert the condition of the current IT block slot without changing any
371  // other slots in the same block.
372  void invertCurrentITCondition() {
373  if (ITState.CurPosition == 1) {
374  ITState.Cond = ARMCC::getOppositeCondition(ITState.Cond);
375  } else {
376  ITState.Mask ^= 1 << (5 - ITState.CurPosition);
377  }
378  }
379 
380  // Returns true if the current IT block is full (all 4 slots used).
381  bool isITBlockFull() {
382  return inITBlock() && (ITState.Mask & 1);
383  }
384 
385  // Extend the current implicit IT block to have one more slot with the given
386  // condition code.
387  void extendImplicitITBlock(ARMCC::CondCodes Cond) {
388  assert(inImplicitITBlock());
389  assert(!isITBlockFull());
390  assert(Cond == ITState.Cond ||
391  Cond == ARMCC::getOppositeCondition(ITState.Cond));
392  unsigned TZ = countTrailingZeros(ITState.Mask);
393  unsigned NewMask = 0;
394  // Keep any existing condition bits.
395  NewMask |= ITState.Mask & (0xE << TZ);
396  // Insert the new condition bit.
397  NewMask |= (Cond != ITState.Cond) << TZ;
398  // Move the trailing 1 down one bit.
399  NewMask |= 1 << (TZ - 1);
400  ITState.Mask = NewMask;
401  }
402 
403  // Create a new implicit IT block with a dummy condition code.
404  void startImplicitITBlock() {
405  assert(!inITBlock());
406  ITState.Cond = ARMCC::AL;
407  ITState.Mask = 8;
408  ITState.CurPosition = 1;
409  ITState.IsExplicit = false;
410  }
411 
412  // Create a new explicit IT block with the given condition and mask.
413  // The mask should be in the format used in ARMOperand and
414  // MCOperand, with a 1 implying 'e', regardless of the low bit of
415  // the condition.
416  void startExplicitITBlock(ARMCC::CondCodes Cond, unsigned Mask) {
417  assert(!inITBlock());
418  ITState.Cond = Cond;
419  ITState.Mask = Mask;
420  ITState.CurPosition = 0;
421  ITState.IsExplicit = true;
422  }
423 
424  struct {
425  unsigned Mask : 4;
426  unsigned CurPosition;
427  } VPTState;
428  bool inVPTBlock() { return VPTState.CurPosition != ~0U; }
429  void forwardVPTPosition() {
430  if (!inVPTBlock()) return;
431  unsigned TZ = countTrailingZeros(VPTState.Mask);
432  if (++VPTState.CurPosition == 5 - TZ)
433  VPTState.CurPosition = ~0U;
434  }
435 
436  void Note(SMLoc L, const Twine &Msg, SMRange Range = None) {
437  return getParser().Note(L, Msg, Range);
438  }
439 
440  bool Warning(SMLoc L, const Twine &Msg, SMRange Range = None) {
441  return getParser().Warning(L, Msg, Range);
442  }
443 
444  bool Error(SMLoc L, const Twine &Msg, SMRange Range = None) {
445  return getParser().Error(L, Msg, Range);
446  }
447 
448  bool validatetLDMRegList(const MCInst &Inst, const OperandVector &Operands,
449  unsigned ListNo, bool IsARPop = false);
450  bool validatetSTMRegList(const MCInst &Inst, const OperandVector &Operands,
451  unsigned ListNo);
452 
453  int tryParseRegister();
454  bool tryParseRegisterWithWriteBack(OperandVector &);
455  int tryParseShiftRegister(OperandVector &);
456  bool parseRegisterList(OperandVector &, bool EnforceOrder = true);
457  bool parseMemory(OperandVector &);
458  bool parseOperand(OperandVector &, StringRef Mnemonic);
459  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
460  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
461  unsigned &ShiftAmount);
462  bool parseLiteralValues(unsigned Size, SMLoc L);
463  bool parseDirectiveThumb(SMLoc L);
464  bool parseDirectiveARM(SMLoc L);
465  bool parseDirectiveThumbFunc(SMLoc L);
466  bool parseDirectiveCode(SMLoc L);
467  bool parseDirectiveSyntax(SMLoc L);
468  bool parseDirectiveReq(StringRef Name, SMLoc L);
469  bool parseDirectiveUnreq(SMLoc L);
470  bool parseDirectiveArch(SMLoc L);
471  bool parseDirectiveEabiAttr(SMLoc L);
472  bool parseDirectiveCPU(SMLoc L);
473  bool parseDirectiveFPU(SMLoc L);
474  bool parseDirectiveFnStart(SMLoc L);
475  bool parseDirectiveFnEnd(SMLoc L);
476  bool parseDirectiveCantUnwind(SMLoc L);
477  bool parseDirectivePersonality(SMLoc L);
478  bool parseDirectiveHandlerData(SMLoc L);
479  bool parseDirectiveSetFP(SMLoc L);
480  bool parseDirectivePad(SMLoc L);
481  bool parseDirectiveRegSave(SMLoc L, bool IsVector);
482  bool parseDirectiveInst(SMLoc L, char Suffix = '\0');
483  bool parseDirectiveLtorg(SMLoc L);
484  bool parseDirectiveEven(SMLoc L);
485  bool parseDirectivePersonalityIndex(SMLoc L);
486  bool parseDirectiveUnwindRaw(SMLoc L);
487  bool parseDirectiveTLSDescSeq(SMLoc L);
488  bool parseDirectiveMovSP(SMLoc L);
489  bool parseDirectiveObjectArch(SMLoc L);
490  bool parseDirectiveArchExtension(SMLoc L);
491  bool parseDirectiveAlign(SMLoc L);
492  bool parseDirectiveThumbSet(SMLoc L);
493 
494  bool isMnemonicVPTPredicable(StringRef Mnemonic, StringRef ExtraToken);
495  StringRef splitMnemonic(StringRef Mnemonic, StringRef ExtraToken,
496  unsigned &PredicationCode,
497  unsigned &VPTPredicationCode, bool &CarrySetting,
498  unsigned &ProcessorIMod, StringRef &ITMask);
499  void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef ExtraToken,
500  StringRef FullInst, bool &CanAcceptCarrySet,
501  bool &CanAcceptPredicationCode,
502  bool &CanAcceptVPTPredicationCode);
503 
504  void tryConvertingToTwoOperandForm(StringRef Mnemonic, bool CarrySetting,
506  bool CDEConvertDualRegOperand(StringRef Mnemonic, OperandVector &Operands);
507 
508  bool isThumb() const {
509  // FIXME: Can tablegen auto-generate this?
510  return getSTI().getFeatureBits()[ARM::ModeThumb];
511  }
512 
513  bool isThumbOne() const {
514  return isThumb() && !getSTI().getFeatureBits()[ARM::FeatureThumb2];
515  }
516 
517  bool isThumbTwo() const {
518  return isThumb() && getSTI().getFeatureBits()[ARM::FeatureThumb2];
519  }
520 
521  bool hasThumb() const {
522  return getSTI().getFeatureBits()[ARM::HasV4TOps];
523  }
524 
525  bool hasThumb2() const {
526  return getSTI().getFeatureBits()[ARM::FeatureThumb2];
527  }
528 
529  bool hasV6Ops() const {
530  return getSTI().getFeatureBits()[ARM::HasV6Ops];
531  }
532 
533  bool hasV6T2Ops() const {
534  return getSTI().getFeatureBits()[ARM::HasV6T2Ops];
535  }
536 
537  bool hasV6MOps() const {
538  return getSTI().getFeatureBits()[ARM::HasV6MOps];
539  }
540 
541  bool hasV7Ops() const {
542  return getSTI().getFeatureBits()[ARM::HasV7Ops];
543  }
544 
545  bool hasV8Ops() const {
546  return getSTI().getFeatureBits()[ARM::HasV8Ops];
547  }
548 
549  bool hasV8MBaseline() const {
550  return getSTI().getFeatureBits()[ARM::HasV8MBaselineOps];
551  }
552 
553  bool hasV8MMainline() const {
554  return getSTI().getFeatureBits()[ARM::HasV8MMainlineOps];
555  }
556  bool hasV8_1MMainline() const {
557  return getSTI().getFeatureBits()[ARM::HasV8_1MMainlineOps];
558  }
559  bool hasMVE() const {
560  return getSTI().getFeatureBits()[ARM::HasMVEIntegerOps];
561  }
562  bool hasMVEFloat() const {
563  return getSTI().getFeatureBits()[ARM::HasMVEFloatOps];
564  }
565  bool hasCDE() const {
566  return getSTI().getFeatureBits()[ARM::HasCDEOps];
567  }
568  bool has8MSecExt() const {
569  return getSTI().getFeatureBits()[ARM::Feature8MSecExt];
570  }
571 
572  bool hasARM() const {
573  return !getSTI().getFeatureBits()[ARM::FeatureNoARM];
574  }
575 
576  bool hasDSP() const {
577  return getSTI().getFeatureBits()[ARM::FeatureDSP];
578  }
579 
580  bool hasD32() const {
581  return getSTI().getFeatureBits()[ARM::FeatureD32];
582  }
583 
584  bool hasV8_1aOps() const {
585  return getSTI().getFeatureBits()[ARM::HasV8_1aOps];
586  }
587 
588  bool hasRAS() const {
589  return getSTI().getFeatureBits()[ARM::FeatureRAS];
590  }
591 
592  void SwitchMode() {
593  MCSubtargetInfo &STI = copySTI();
594  auto FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
595  setAvailableFeatures(FB);
596  }
597 
598  void FixModeAfterArchChange(bool WasThumb, SMLoc Loc);
599 
600  bool isMClass() const {
601  return getSTI().getFeatureBits()[ARM::FeatureMClass];
602  }
603 
604  /// @name Auto-generated Match Functions
605  /// {
606 
607 #define GET_ASSEMBLER_HEADER
608 #include "ARMGenAsmMatcher.inc"
609 
610  /// }
611 
612  OperandMatchResultTy parseITCondCode(OperandVector &);
613  OperandMatchResultTy parseCoprocNumOperand(OperandVector &);
614  OperandMatchResultTy parseCoprocRegOperand(OperandVector &);
615  OperandMatchResultTy parseCoprocOptionOperand(OperandVector &);
616  OperandMatchResultTy parseMemBarrierOptOperand(OperandVector &);
617  OperandMatchResultTy parseTraceSyncBarrierOptOperand(OperandVector &);
618  OperandMatchResultTy parseInstSyncBarrierOptOperand(OperandVector &);
619  OperandMatchResultTy parseProcIFlagsOperand(OperandVector &);
620  OperandMatchResultTy parseMSRMaskOperand(OperandVector &);
621  OperandMatchResultTy parseBankedRegOperand(OperandVector &);
622  OperandMatchResultTy parsePKHImm(OperandVector &O, StringRef Op, int Low,
623  int High);
624  OperandMatchResultTy parsePKHLSLImm(OperandVector &O) {
625  return parsePKHImm(O, "lsl", 0, 31);
626  }
627  OperandMatchResultTy parsePKHASRImm(OperandVector &O) {
628  return parsePKHImm(O, "asr", 1, 32);
629  }
630  OperandMatchResultTy parseSetEndImm(OperandVector &);
631  OperandMatchResultTy parseShifterImm(OperandVector &);
632  OperandMatchResultTy parseRotImm(OperandVector &);
633  OperandMatchResultTy parseModImm(OperandVector &);
634  OperandMatchResultTy parseBitfield(OperandVector &);
635  OperandMatchResultTy parsePostIdxReg(OperandVector &);
636  OperandMatchResultTy parseAM3Offset(OperandVector &);
637  OperandMatchResultTy parseFPImm(OperandVector &);
638  OperandMatchResultTy parseVectorList(OperandVector &);
639  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
640  SMLoc &EndLoc);
641 
642  // Asm Match Converter Methods
643  void cvtThumbMultiply(MCInst &Inst, const OperandVector &);
644  void cvtThumbBranches(MCInst &Inst, const OperandVector &);
645  void cvtMVEVMOVQtoDReg(MCInst &Inst, const OperandVector &);
646 
647  bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
648  bool processInstruction(MCInst &Inst, const OperandVector &Ops, MCStreamer &Out);
649  bool shouldOmitCCOutOperand(StringRef Mnemonic, OperandVector &Operands);
650  bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
651  bool shouldOmitVectorPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
652  bool isITBlockTerminator(MCInst &Inst) const;
653  void fixupGNULDRDAlias(StringRef Mnemonic, OperandVector &Operands);
654  bool validateLDRDSTRD(MCInst &Inst, const OperandVector &Operands,
655  bool Load, bool ARMMode, bool Writeback);
656 
657 public:
658  enum ARMMatchResultTy {
659  Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
660  Match_RequiresNotITBlock,
661  Match_RequiresV6,
662  Match_RequiresThumb2,
663  Match_RequiresV8,
664  Match_RequiresFlagSetting,
665 #define GET_OPERAND_DIAGNOSTIC_TYPES
666 #include "ARMGenAsmMatcher.inc"
667 
668  };
669 
670  ARMAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
671  const MCInstrInfo &MII, const MCTargetOptions &Options)
672  : MCTargetAsmParser(Options, STI, MII), UC(Parser), MS(STI) {
674 
675  // Cache the MCRegisterInfo.
676  MRI = getContext().getRegisterInfo();
677 
678  // Initialize the set of available features.
679  setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
680 
681  // Add build attributes based on the selected target.
682  if (AddBuildAttributes)
683  getTargetStreamer().emitTargetAttributes(STI);
684 
685  // Not in an ITBlock to start with.
686  ITState.CurPosition = ~0U;
687 
688  VPTState.CurPosition = ~0U;
689 
690  NextSymbolIsThumb = false;
691  }
692 
693  // Implementation of the MCTargetAsmParser interface:
694  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
695  OperandMatchResultTy tryParseRegister(unsigned &RegNo, SMLoc &StartLoc,
696  SMLoc &EndLoc) override;
697  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
698  SMLoc NameLoc, OperandVector &Operands) override;
699  bool ParseDirective(AsmToken DirectiveID) override;
700 
701  unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
702  unsigned Kind) override;
703  unsigned checkTargetMatchPredicate(MCInst &Inst) override;
704 
705  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
707  uint64_t &ErrorInfo,
708  bool MatchingInlineAsm) override;
709  unsigned MatchInstruction(OperandVector &Operands, MCInst &Inst,
710  SmallVectorImpl<NearMissInfo> &NearMisses,
711  bool MatchingInlineAsm, bool &EmitInITBlock,
712  MCStreamer &Out);
713 
714  struct NearMissMessage {
715  SMLoc Loc;
716  SmallString<128> Message;
717  };
718 
719  const char *getCustomOperandDiag(ARMMatchResultTy MatchError);
720 
721  void FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
722  SmallVectorImpl<NearMissMessage> &NearMissesOut,
723  SMLoc IDLoc, OperandVector &Operands);
724  void ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses, SMLoc IDLoc,
726 
727  void doBeforeLabelEmit(MCSymbol *Symbol) override;
728 
729  void onLabelParsed(MCSymbol *Symbol) override;
730 };
731 
732 /// ARMOperand - Instances of this class represent a parsed ARM machine
733 /// operand.
734 class ARMOperand : public MCParsedAsmOperand {
735  enum KindTy {
736  k_CondCode,
737  k_VPTPred,
738  k_CCOut,
739  k_ITCondMask,
740  k_CoprocNum,
741  k_CoprocReg,
742  k_CoprocOption,
743  k_Immediate,
744  k_MemBarrierOpt,
745  k_InstSyncBarrierOpt,
746  k_TraceSyncBarrierOpt,
747  k_Memory,
748  k_PostIndexRegister,
749  k_MSRMask,
750  k_BankedReg,
751  k_ProcIFlags,
752  k_VectorIndex,
753  k_Register,
754  k_RegisterList,
755  k_RegisterListWithAPSR,
756  k_DPRRegisterList,
757  k_SPRRegisterList,
758  k_FPSRegisterListWithVPR,
759  k_FPDRegisterListWithVPR,
760  k_VectorList,
761  k_VectorListAllLanes,
762  k_VectorListIndexed,
763  k_ShiftedRegister,
764  k_ShiftedImmediate,
765  k_ShifterImmediate,
766  k_RotateImmediate,
767  k_ModifiedImmediate,
768  k_ConstantPoolImmediate,
769  k_BitfieldDescriptor,
770  k_Token,
771  } Kind;
772 
773  SMLoc StartLoc, EndLoc, AlignmentLoc;
775 
776  struct CCOp {
777  ARMCC::CondCodes Val;
778  };
779 
780  struct VCCOp {
781  ARMVCC::VPTCodes Val;
782  };
783 
784  struct CopOp {
785  unsigned Val;
786  };
787 
788  struct CoprocOptionOp {
789  unsigned Val;
790  };
791 
792  struct ITMaskOp {
793  unsigned Mask:4;
794  };
795 
796  struct MBOptOp {
797  ARM_MB::MemBOpt Val;
798  };
799 
800  struct ISBOptOp {
802  };
803 
804  struct TSBOptOp {
806  };
807 
808  struct IFlagsOp {
809  ARM_PROC::IFlags Val;
810  };
811 
812  struct MMaskOp {
813  unsigned Val;
814  };
815 
816  struct BankedRegOp {
817  unsigned Val;
818  };
819 
820  struct TokOp {
821  const char *Data;
822  unsigned Length;
823  };
824 
825  struct RegOp {
826  unsigned RegNum;
827  };
828 
829  // A vector register list is a sequential list of 1 to 4 registers.
830  struct VectorListOp {
831  unsigned RegNum;
832  unsigned Count;
833  unsigned LaneIndex;
834  bool isDoubleSpaced;
835  };
836 
837  struct VectorIndexOp {
838  unsigned Val;
839  };
840 
841  struct ImmOp {
842  const MCExpr *Val;
843  };
844 
845  /// Combined record for all forms of ARM address expressions.
846  struct MemoryOp {
847  unsigned BaseRegNum;
848  // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
849  // was specified.
850  const MCExpr *OffsetImm; // Offset immediate value
851  unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL
852  ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
853  unsigned ShiftImm; // shift for OffsetReg.
854  unsigned Alignment; // 0 = no alignment specified
855  // n = alignment in bytes (2, 4, 8, 16, or 32)
856  unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit)
857  };
858 
859  struct PostIdxRegOp {
860  unsigned RegNum;
861  bool isAdd;
862  ARM_AM::ShiftOpc ShiftTy;
863  unsigned ShiftImm;
864  };
865 
866  struct ShifterImmOp {
867  bool isASR;
868  unsigned Imm;
869  };
870 
871  struct RegShiftedRegOp {
872  ARM_AM::ShiftOpc ShiftTy;
873  unsigned SrcReg;
874  unsigned ShiftReg;
875  unsigned ShiftImm;
876  };
877 
878  struct RegShiftedImmOp {
879  ARM_AM::ShiftOpc ShiftTy;
880  unsigned SrcReg;
881  unsigned ShiftImm;
882  };
883 
884  struct RotImmOp {
885  unsigned Imm;
886  };
887 
888  struct ModImmOp {
889  unsigned Bits;
890  unsigned Rot;
891  };
892 
893  struct BitfieldOp {
894  unsigned LSB;
895  unsigned Width;
896  };
897 
898  union {
899  struct CCOp CC;
900  struct VCCOp VCC;
901  struct CopOp Cop;
902  struct CoprocOptionOp CoprocOption;
903  struct MBOptOp MBOpt;
904  struct ISBOptOp ISBOpt;
905  struct TSBOptOp TSBOpt;
906  struct ITMaskOp ITMask;
907  struct IFlagsOp IFlags;
908  struct MMaskOp MMask;
909  struct BankedRegOp BankedReg;
910  struct TokOp Tok;
911  struct RegOp Reg;
912  struct VectorListOp VectorList;
913  struct VectorIndexOp VectorIndex;
914  struct ImmOp Imm;
915  struct MemoryOp Memory;
916  struct PostIdxRegOp PostIdxReg;
917  struct ShifterImmOp ShifterImm;
918  struct RegShiftedRegOp RegShiftedReg;
919  struct RegShiftedImmOp RegShiftedImm;
920  struct RotImmOp RotImm;
921  struct ModImmOp ModImm;
922  struct BitfieldOp Bitfield;
923  };
924 
925 public:
926  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
927 
928  /// getStartLoc - Get the location of the first token of this operand.
929  SMLoc getStartLoc() const override { return StartLoc; }
930 
931  /// getEndLoc - Get the location of the last token of this operand.
932  SMLoc getEndLoc() const override { return EndLoc; }
933 
934  /// getLocRange - Get the range between the first and last token of this
935  /// operand.
936  SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
937 
938  /// getAlignmentLoc - Get the location of the Alignment token of this operand.
939  SMLoc getAlignmentLoc() const {
940  assert(Kind == k_Memory && "Invalid access!");
941  return AlignmentLoc;
942  }
943 
944  ARMCC::CondCodes getCondCode() const {
945  assert(Kind == k_CondCode && "Invalid access!");
946  return CC.Val;
947  }
948 
949  ARMVCC::VPTCodes getVPTPred() const {
950  assert(isVPTPred() && "Invalid access!");
951  return VCC.Val;
952  }
953 
954  unsigned getCoproc() const {
955  assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
956  return Cop.Val;
957  }
958 
959  StringRef getToken() const {
960  assert(Kind == k_Token && "Invalid access!");
961  return StringRef(Tok.Data, Tok.Length);
962  }
963 
964  unsigned getReg() const override {
965  assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
966  return Reg.RegNum;
967  }
968 
969  const SmallVectorImpl<unsigned> &getRegList() const {
970  assert((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR ||
971  Kind == k_DPRRegisterList || Kind == k_SPRRegisterList ||
972  Kind == k_FPSRegisterListWithVPR ||
973  Kind == k_FPDRegisterListWithVPR) &&
974  "Invalid access!");
975  return Registers;
976  }
977 
978  const MCExpr *getImm() const {
979  assert(isImm() && "Invalid access!");
980  return Imm.Val;
981  }
982 
983  const MCExpr *getConstantPoolImm() const {
984  assert(isConstantPoolImm() && "Invalid access!");
985  return Imm.Val;
986  }
987 
988  unsigned getVectorIndex() const {
989  assert(Kind == k_VectorIndex && "Invalid access!");
990  return VectorIndex.Val;
991  }
992 
993  ARM_MB::MemBOpt getMemBarrierOpt() const {
994  assert(Kind == k_MemBarrierOpt && "Invalid access!");
995  return MBOpt.Val;
996  }
997 
998  ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const {
999  assert(Kind == k_InstSyncBarrierOpt && "Invalid access!");
1000  return ISBOpt.Val;
1001  }
1002 
1003  ARM_TSB::TraceSyncBOpt getTraceSyncBarrierOpt() const {
1004  assert(Kind == k_TraceSyncBarrierOpt && "Invalid access!");
1005  return TSBOpt.Val;
1006  }
1007 
1008  ARM_PROC::IFlags getProcIFlags() const {
1009  assert(Kind == k_ProcIFlags && "Invalid access!");
1010  return IFlags.Val;
1011  }
1012 
1013  unsigned getMSRMask() const {
1014  assert(Kind == k_MSRMask && "Invalid access!");
1015  return MMask.Val;
1016  }
1017 
1018  unsigned getBankedReg() const {
1019  assert(Kind == k_BankedReg && "Invalid access!");
1020  return BankedReg.Val;
1021  }
1022 
1023  bool isCoprocNum() const { return Kind == k_CoprocNum; }
1024  bool isCoprocReg() const { return Kind == k_CoprocReg; }
1025  bool isCoprocOption() const { return Kind == k_CoprocOption; }
1026  bool isCondCode() const { return Kind == k_CondCode; }
1027  bool isVPTPred() const { return Kind == k_VPTPred; }
1028  bool isCCOut() const { return Kind == k_CCOut; }
1029  bool isITMask() const { return Kind == k_ITCondMask; }
1030  bool isITCondCode() const { return Kind == k_CondCode; }
1031  bool isImm() const override {
1032  return Kind == k_Immediate;
1033  }
1034 
1035  bool isARMBranchTarget() const {
1036  if (!isImm()) return false;
1037 
1038  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1039  return CE->getValue() % 4 == 0;
1040  return true;
1041  }
1042 
1043 
1044  bool isThumbBranchTarget() const {
1045  if (!isImm()) return false;
1046 
1047  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1048  return CE->getValue() % 2 == 0;
1049  return true;
1050  }
1051 
1052  // checks whether this operand is an unsigned offset which fits is a field
1053  // of specified width and scaled by a specific number of bits
1054  template<unsigned width, unsigned scale>
1055  bool isUnsignedOffset() const {
1056  if (!isImm()) return false;
1057  if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1058  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1059  int64_t Val = CE->getValue();
1060  int64_t Align = 1LL << scale;
1061  int64_t Max = Align * ((1LL << width) - 1);
1062  return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max);
1063  }
1064  return false;
1065  }
1066 
1067  // checks whether this operand is an signed offset which fits is a field
1068  // of specified width and scaled by a specific number of bits
1069  template<unsigned width, unsigned scale>
1070  bool isSignedOffset() const {
1071  if (!isImm()) return false;
1072  if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1073  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1074  int64_t Val = CE->getValue();
1075  int64_t Align = 1LL << scale;
1076  int64_t Max = Align * ((1LL << (width-1)) - 1);
1077  int64_t Min = -Align * (1LL << (width-1));
1078  return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max);
1079  }
1080  return false;
1081  }
1082 
1083  // checks whether this operand is an offset suitable for the LE /
1084  // LETP instructions in Arm v8.1M
1085  bool isLEOffset() const {
1086  if (!isImm()) return false;
1087  if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1088  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1089  int64_t Val = CE->getValue();
1090  return Val < 0 && Val >= -4094 && (Val & 1) == 0;
1091  }
1092  return false;
1093  }
1094 
1095  // checks whether this operand is a memory operand computed as an offset
1096  // applied to PC. the offset may have 8 bits of magnitude and is represented
1097  // with two bits of shift. textually it may be either [pc, #imm], #imm or
1098  // relocable expression...
1099  bool isThumbMemPC() const {
1100  int64_t Val = 0;
1101  if (isImm()) {
1102  if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1103  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
1104  if (!CE) return false;
1105  Val = CE->getValue();
1106  }
1107  else if (isGPRMem()) {
1108  if(!Memory.OffsetImm || Memory.OffsetRegNum) return false;
1109  if(Memory.BaseRegNum != ARM::PC) return false;
1110  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
1111  Val = CE->getValue();
1112  else
1113  return false;
1114  }
1115  else return false;
1116  return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
1117  }
1118 
1119  bool isFPImm() const {
1120  if (!isImm()) return false;
1121  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1122  if (!CE) return false;
1123  int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1124  return Val != -1;
1125  }
1126 
1127  template<int64_t N, int64_t M>
1128  bool isImmediate() const {
1129  if (!isImm()) return false;
1130  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1131  if (!CE) return false;
1132  int64_t Value = CE->getValue();
1133  return Value >= N && Value <= M;
1134  }
1135 
1136  template<int64_t N, int64_t M>
1137  bool isImmediateS4() const {
1138  if (!isImm()) return false;
1139  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1140  if (!CE) return false;
1141  int64_t Value = CE->getValue();
1142  return ((Value & 3) == 0) && Value >= N && Value <= M;
1143  }
1144  template<int64_t N, int64_t M>
1145  bool isImmediateS2() const {
1146  if (!isImm()) return false;
1147  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1148  if (!CE) return false;
1149  int64_t Value = CE->getValue();
1150  return ((Value & 1) == 0) && Value >= N && Value <= M;
1151  }
1152  bool isFBits16() const {
1153  return isImmediate<0, 17>();
1154  }
1155  bool isFBits32() const {
1156  return isImmediate<1, 33>();
1157  }
1158  bool isImm8s4() const {
1159  return isImmediateS4<-1020, 1020>();
1160  }
1161  bool isImm7s4() const {
1162  return isImmediateS4<-508, 508>();
1163  }
1164  bool isImm7Shift0() const {
1165  return isImmediate<-127, 127>();
1166  }
1167  bool isImm7Shift1() const {
1168  return isImmediateS2<-255, 255>();
1169  }
1170  bool isImm7Shift2() const {
1171  return isImmediateS4<-511, 511>();
1172  }
1173  bool isImm7() const {
1174  return isImmediate<-127, 127>();
1175  }
1176  bool isImm0_1020s4() const {
1177  return isImmediateS4<0, 1020>();
1178  }
1179  bool isImm0_508s4() const {
1180  return isImmediateS4<0, 508>();
1181  }
1182  bool isImm0_508s4Neg() const {
1183  if (!isImm()) return false;
1184  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1185  if (!CE) return false;
1186  int64_t Value = -CE->getValue();
1187  // explicitly exclude zero. we want that to use the normal 0_508 version.
1188  return ((Value & 3) == 0) && Value > 0 && Value <= 508;
1189  }
1190 
1191  bool isImm0_4095Neg() const {
1192  if (!isImm()) return false;
1193  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1194  if (!CE) return false;
1195  // isImm0_4095Neg is used with 32-bit immediates only.
1196  // 32-bit immediates are zero extended to 64-bit when parsed,
1197  // thus simple -CE->getValue() results in a big negative number,
1198  // not a small positive number as intended
1199  if ((CE->getValue() >> 32) > 0) return false;
1200  uint32_t Value = -static_cast<uint32_t>(CE->getValue());
1201  return Value > 0 && Value < 4096;
1202  }
1203 
1204  bool isImm0_7() const {
1205  return isImmediate<0, 7>();
1206  }
1207 
1208  bool isImm1_16() const {
1209  return isImmediate<1, 16>();
1210  }
1211 
1212  bool isImm1_32() const {
1213  return isImmediate<1, 32>();
1214  }
1215 
1216  bool isImm8_255() const {
1217  return isImmediate<8, 255>();
1218  }
1219 
1220  bool isImm256_65535Expr() const {
1221  if (!isImm()) return false;
1222  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1223  // If it's not a constant expression, it'll generate a fixup and be
1224  // handled later.
1225  if (!CE) return true;
1226  int64_t Value = CE->getValue();
1227  return Value >= 256 && Value < 65536;
1228  }
1229 
1230  bool isImm0_65535Expr() const {
1231  if (!isImm()) return false;
1232  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1233  // If it's not a constant expression, it'll generate a fixup and be
1234  // handled later.
1235  if (!CE) return true;
1236  int64_t Value = CE->getValue();
1237  return Value >= 0 && Value < 65536;
1238  }
1239 
1240  bool isImm24bit() const {
1241  return isImmediate<0, 0xffffff + 1>();
1242  }
1243 
1244  bool isImmThumbSR() const {
1245  return isImmediate<1, 33>();
1246  }
1247 
1248  template<int shift>
1249  bool isExpImmValue(uint64_t Value) const {
1250  uint64_t mask = (1 << shift) - 1;
1251  if ((Value & mask) != 0 || (Value >> shift) > 0xff)
1252  return false;
1253  return true;
1254  }
1255 
1256  template<int shift>
1257  bool isExpImm() const {
1258  if (!isImm()) return false;
1259  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1260  if (!CE) return false;
1261 
1262  return isExpImmValue<shift>(CE->getValue());
1263  }
1264 
1265  template<int shift, int size>
1266  bool isInvertedExpImm() const {
1267  if (!isImm()) return false;
1268  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1269  if (!CE) return false;
1270 
1271  uint64_t OriginalValue = CE->getValue();
1272  uint64_t InvertedValue = OriginalValue ^ (((uint64_t)1 << size) - 1);
1273  return isExpImmValue<shift>(InvertedValue);
1274  }
1275 
1276  bool isPKHLSLImm() const {
1277  return isImmediate<0, 32>();
1278  }
1279 
1280  bool isPKHASRImm() const {
1281  return isImmediate<0, 33>();
1282  }
1283 
1284  bool isAdrLabel() const {
1285  // If we have an immediate that's not a constant, treat it as a label
1286  // reference needing a fixup.
1287  if (isImm() && !isa<MCConstantExpr>(getImm()))
1288  return true;
1289 
1290  // If it is a constant, it must fit into a modified immediate encoding.
1291  if (!isImm()) return false;
1292  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1293  if (!CE) return false;
1294  int64_t Value = CE->getValue();
1295  return (ARM_AM::getSOImmVal(Value) != -1 ||
1296  ARM_AM::getSOImmVal(-Value) != -1);
1297  }
1298 
1299  bool isT2SOImm() const {
1300  // If we have an immediate that's not a constant, treat it as an expression
1301  // needing a fixup.
1302  if (isImm() && !isa<MCConstantExpr>(getImm())) {
1303  // We want to avoid matching :upper16: and :lower16: as we want these
1304  // expressions to match in isImm0_65535Expr()
1305  const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(getImm());
1306  return (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
1307  ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16));
1308  }
1309  if (!isImm()) return false;
1310  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1311  if (!CE) return false;
1312  int64_t Value = CE->getValue();
1313  return ARM_AM::getT2SOImmVal(Value) != -1;
1314  }
1315 
1316  bool isT2SOImmNot() const {
1317  if (!isImm()) return false;
1318  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1319  if (!CE) return false;
1320  int64_t Value = CE->getValue();
1321  return ARM_AM::getT2SOImmVal(Value) == -1 &&
1322  ARM_AM::getT2SOImmVal(~Value) != -1;
1323  }
1324 
1325  bool isT2SOImmNeg() const {
1326  if (!isImm()) return false;
1327  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1328  if (!CE) return false;
1329  int64_t Value = CE->getValue();
1330  // Only use this when not representable as a plain so_imm.
1331  return ARM_AM::getT2SOImmVal(Value) == -1 &&
1332  ARM_AM::getT2SOImmVal(-Value) != -1;
1333  }
1334 
1335  bool isSetEndImm() const {
1336  if (!isImm()) return false;
1337  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1338  if (!CE) return false;
1339  int64_t Value = CE->getValue();
1340  return Value == 1 || Value == 0;
1341  }
1342 
1343  bool isReg() const override { return Kind == k_Register; }
1344  bool isRegList() const { return Kind == k_RegisterList; }
1345  bool isRegListWithAPSR() const {
1346  return Kind == k_RegisterListWithAPSR || Kind == k_RegisterList;
1347  }
1348  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
1349  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
1350  bool isFPSRegListWithVPR() const { return Kind == k_FPSRegisterListWithVPR; }
1351  bool isFPDRegListWithVPR() const { return Kind == k_FPDRegisterListWithVPR; }
1352  bool isToken() const override { return Kind == k_Token; }
1353  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
1354  bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
1355  bool isTraceSyncBarrierOpt() const { return Kind == k_TraceSyncBarrierOpt; }
1356  bool isMem() const override {
1357  return isGPRMem() || isMVEMem();
1358  }
1359  bool isMVEMem() const {
1360  if (Kind != k_Memory)
1361  return false;
1362  if (Memory.BaseRegNum &&
1363  !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum) &&
1364  !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Memory.BaseRegNum))
1365  return false;
1366  if (Memory.OffsetRegNum &&
1367  !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1368  Memory.OffsetRegNum))
1369  return false;
1370  return true;
1371  }
1372  bool isGPRMem() const {
1373  if (Kind != k_Memory)
1374  return false;
1375  if (Memory.BaseRegNum &&
1376  !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum))
1377  return false;
1378  if (Memory.OffsetRegNum &&
1379  !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.OffsetRegNum))
1380  return false;
1381  return true;
1382  }
1383  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
1384  bool isRegShiftedReg() const {
1385  return Kind == k_ShiftedRegister &&
1386  ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1387  RegShiftedReg.SrcReg) &&
1388  ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1389  RegShiftedReg.ShiftReg);
1390  }
1391  bool isRegShiftedImm() const {
1392  return Kind == k_ShiftedImmediate &&
1393  ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1394  RegShiftedImm.SrcReg);
1395  }
1396  bool isRotImm() const { return Kind == k_RotateImmediate; }
1397 
1398  template<unsigned Min, unsigned Max>
1399  bool isPowerTwoInRange() const {
1400  if (!isImm()) return false;
1401  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1402  if (!CE) return false;
1403  int64_t Value = CE->getValue();
1404  return Value > 0 && countPopulation((uint64_t)Value) == 1 &&
1405  Value >= Min && Value <= Max;
1406  }
1407  bool isModImm() const { return Kind == k_ModifiedImmediate; }
1408 
1409  bool isModImmNot() const {
1410  if (!isImm()) return false;
1411  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1412  if (!CE) return false;
1413  int64_t Value = CE->getValue();
1414  return ARM_AM::getSOImmVal(~Value) != -1;
1415  }
1416 
1417  bool isModImmNeg() const {
1418  if (!isImm()) return false;
1419  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1420  if (!CE) return false;
1421  int64_t Value = CE->getValue();
1422  return ARM_AM::getSOImmVal(Value) == -1 &&
1423  ARM_AM::getSOImmVal(-Value) != -1;
1424  }
1425 
1426  bool isThumbModImmNeg1_7() const {
1427  if (!isImm()) return false;
1428  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1429  if (!CE) return false;
1430  int32_t Value = -(int32_t)CE->getValue();
1431  return 0 < Value && Value < 8;
1432  }
1433 
1434  bool isThumbModImmNeg8_255() const {
1435  if (!isImm()) return false;
1436  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1437  if (!CE) return false;
1438  int32_t Value = -(int32_t)CE->getValue();
1439  return 7 < Value && Value < 256;
1440  }
1441 
1442  bool isConstantPoolImm() const { return Kind == k_ConstantPoolImmediate; }
1443  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
1444  bool isPostIdxRegShifted() const {
1445  return Kind == k_PostIndexRegister &&
1446  ARMMCRegisterClasses[ARM::GPRRegClassID].contains(PostIdxReg.RegNum);
1447  }
1448  bool isPostIdxReg() const {
1449  return isPostIdxRegShifted() && PostIdxReg.ShiftTy == ARM_AM::no_shift;
1450  }
1451  bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const {
1452  if (!isGPRMem())
1453  return false;
1454  // No offset of any kind.
1455  return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1456  (alignOK || Memory.Alignment == Alignment);
1457  }
1458  bool isMemNoOffsetT2(bool alignOK = false, unsigned Alignment = 0) const {
1459  if (!isGPRMem())
1460  return false;
1461 
1462  if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1463  Memory.BaseRegNum))
1464  return false;
1465 
1466  // No offset of any kind.
1467  return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1468  (alignOK || Memory.Alignment == Alignment);
1469  }
1470  bool isMemNoOffsetT2NoSp(bool alignOK = false, unsigned Alignment = 0) const {
1471  if (!isGPRMem())
1472  return false;
1473 
1474  if (!ARMMCRegisterClasses[ARM::rGPRRegClassID].contains(
1475  Memory.BaseRegNum))
1476  return false;
1477 
1478  // No offset of any kind.
1479  return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1480  (alignOK || Memory.Alignment == Alignment);
1481  }
1482  bool isMemNoOffsetT(bool alignOK = false, unsigned Alignment = 0) const {
1483  if (!isGPRMem())
1484  return false;
1485 
1486  if (!ARMMCRegisterClasses[ARM::tGPRRegClassID].contains(
1487  Memory.BaseRegNum))
1488  return false;
1489 
1490  // No offset of any kind.
1491  return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1492  (alignOK || Memory.Alignment == Alignment);
1493  }
1494  bool isMemPCRelImm12() const {
1495  if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1496  return false;
1497  // Base register must be PC.
1498  if (Memory.BaseRegNum != ARM::PC)
1499  return false;
1500  // Immediate offset in range [-4095, 4095].
1501  if (!Memory.OffsetImm) return true;
1502  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1503  int64_t Val = CE->getValue();
1504  return (Val > -4096 && Val < 4096) ||
1506  }
1507  return false;
1508  }
1509 
1510  bool isAlignedMemory() const {
1511  return isMemNoOffset(true);
1512  }
1513 
1514  bool isAlignedMemoryNone() const {
1515  return isMemNoOffset(false, 0);
1516  }
1517 
1518  bool isDupAlignedMemoryNone() const {
1519  return isMemNoOffset(false, 0);
1520  }
1521 
1522  bool isAlignedMemory16() const {
1523  if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1524  return true;
1525  return isMemNoOffset(false, 0);
1526  }
1527 
1528  bool isDupAlignedMemory16() const {
1529  if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1530  return true;
1531  return isMemNoOffset(false, 0);
1532  }
1533 
1534  bool isAlignedMemory32() const {
1535  if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1536  return true;
1537  return isMemNoOffset(false, 0);
1538  }
1539 
1540  bool isDupAlignedMemory32() const {
1541  if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1542  return true;
1543  return isMemNoOffset(false, 0);
1544  }
1545 
1546  bool isAlignedMemory64() const {
1547  if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1548  return true;
1549  return isMemNoOffset(false, 0);
1550  }
1551 
1552  bool isDupAlignedMemory64() const {
1553  if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1554  return true;
1555  return isMemNoOffset(false, 0);
1556  }
1557 
1558  bool isAlignedMemory64or128() const {
1559  if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1560  return true;
1561  if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1562  return true;
1563  return isMemNoOffset(false, 0);
1564  }
1565 
1566  bool isDupAlignedMemory64or128() const {
1567  if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1568  return true;
1569  if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1570  return true;
1571  return isMemNoOffset(false, 0);
1572  }
1573 
1574  bool isAlignedMemory64or128or256() const {
1575  if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1576  return true;
1577  if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1578  return true;
1579  if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32.
1580  return true;
1581  return isMemNoOffset(false, 0);
1582  }
1583 
1584  bool isAddrMode2() const {
1585  if (!isGPRMem() || Memory.Alignment != 0) return false;
1586  // Check for register offset.
1587  if (Memory.OffsetRegNum) return true;
1588  // Immediate offset in range [-4095, 4095].
1589  if (!Memory.OffsetImm) return true;
1590  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1591  int64_t Val = CE->getValue();
1592  return Val > -4096 && Val < 4096;
1593  }
1594  return false;
1595  }
1596 
1597  bool isAM2OffsetImm() const {
1598  if (!isImm()) return false;
1599  // Immediate offset in range [-4095, 4095].
1600  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1601  if (!CE) return false;
1602  int64_t Val = CE->getValue();
1603  return (Val == std::numeric_limits<int32_t>::min()) ||
1604  (Val > -4096 && Val < 4096);
1605  }
1606 
1607  bool isAddrMode3() const {
1608  // If we have an immediate that's not a constant, treat it as a label
1609  // reference needing a fixup. If it is a constant, it's something else
1610  // and we reject it.
1611  if (isImm() && !isa<MCConstantExpr>(getImm()))
1612  return true;
1613  if (!isGPRMem() || Memory.Alignment != 0) return false;
1614  // No shifts are legal for AM3.
1615  if (Memory.ShiftType != ARM_AM::no_shift) return false;
1616  // Check for register offset.
1617  if (Memory.OffsetRegNum) return true;
1618  // Immediate offset in range [-255, 255].
1619  if (!Memory.OffsetImm) return true;
1620  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1621  int64_t Val = CE->getValue();
1622  // The #-0 offset is encoded as std::numeric_limits<int32_t>::min(), and
1623  // we have to check for this too.
1624  return (Val > -256 && Val < 256) ||
1626  }
1627  return false;
1628  }
1629 
1630  bool isAM3Offset() const {
1631  if (isPostIdxReg())
1632  return true;
1633  if (!isImm())
1634  return false;
1635  // Immediate offset in range [-255, 255].
1636  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1637  if (!CE) return false;
1638  int64_t Val = CE->getValue();
1639  // Special case, #-0 is std::numeric_limits<int32_t>::min().
1640  return (Val > -256 && Val < 256) ||
1642  }
1643 
1644  bool isAddrMode5() const {
1645  // If we have an immediate that's not a constant, treat it as a label
1646  // reference needing a fixup. If it is a constant, it's something else
1647  // and we reject it.
1648  if (isImm() && !isa<MCConstantExpr>(getImm()))
1649  return true;
1650  if (!isGPRMem() || Memory.Alignment != 0) return false;
1651  // Check for register offset.
1652  if (Memory.OffsetRegNum) return false;
1653  // Immediate offset in range [-1020, 1020] and a multiple of 4.
1654  if (!Memory.OffsetImm) return true;
1655  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1656  int64_t Val = CE->getValue();
1657  return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1659  }
1660  return false;
1661  }
1662 
1663  bool isAddrMode5FP16() const {
1664  // If we have an immediate that's not a constant, treat it as a label
1665  // reference needing a fixup. If it is a constant, it's something else
1666  // and we reject it.
1667  if (isImm() && !isa<MCConstantExpr>(getImm()))
1668  return true;
1669  if (!isGPRMem() || Memory.Alignment != 0) return false;
1670  // Check for register offset.
1671  if (Memory.OffsetRegNum) return false;
1672  // Immediate offset in range [-510, 510] and a multiple of 2.
1673  if (!Memory.OffsetImm) return true;
1674  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1675  int64_t Val = CE->getValue();
1676  return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) ||
1678  }
1679  return false;
1680  }
1681 
1682  bool isMemTBB() const {
1683  if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1684  Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1685  return false;
1686  return true;
1687  }
1688 
1689  bool isMemTBH() const {
1690  if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1691  Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
1692  Memory.Alignment != 0 )
1693  return false;
1694  return true;
1695  }
1696 
1697  bool isMemRegOffset() const {
1698  if (!isGPRMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
1699  return false;
1700  return true;
1701  }
1702 
1703  bool isT2MemRegOffset() const {
1704  if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1705  Memory.Alignment != 0 || Memory.BaseRegNum == ARM::PC)
1706  return false;
1707  // Only lsl #{0, 1, 2, 3} allowed.
1708  if (Memory.ShiftType == ARM_AM::no_shift)
1709  return true;
1710  if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
1711  return false;
1712  return true;
1713  }
1714 
1715  bool isMemThumbRR() const {
1716  // Thumb reg+reg addressing is simple. Just two registers, a base and
1717  // an offset. No shifts, negations or any other complicating factors.
1718  if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1719  Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1720  return false;
1721  return isARMLowRegister(Memory.BaseRegNum) &&
1722  (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
1723  }
1724 
1725  bool isMemThumbRIs4() const {
1726  if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1727  !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1728  return false;
1729  // Immediate offset, multiple of 4 in range [0, 124].
1730  if (!Memory.OffsetImm) return true;
1731  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1732  int64_t Val = CE->getValue();
1733  return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1734  }
1735  return false;
1736  }
1737 
1738  bool isMemThumbRIs2() const {
1739  if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1740  !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1741  return false;
1742  // Immediate offset, multiple of 4 in range [0, 62].
1743  if (!Memory.OffsetImm) return true;
1744  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1745  int64_t Val = CE->getValue();
1746  return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1747  }
1748  return false;
1749  }
1750 
1751  bool isMemThumbRIs1() const {
1752  if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1753  !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1754  return false;
1755  // Immediate offset in range [0, 31].
1756  if (!Memory.OffsetImm) return true;
1757  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1758  int64_t Val = CE->getValue();
1759  return Val >= 0 && Val <= 31;
1760  }
1761  return false;
1762  }
1763 
1764  bool isMemThumbSPI() const {
1765  if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1766  Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1767  return false;
1768  // Immediate offset, multiple of 4 in range [0, 1020].
1769  if (!Memory.OffsetImm) return true;
1770  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1771  int64_t Val = CE->getValue();
1772  return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1773  }
1774  return false;
1775  }
1776 
1777  bool isMemImm8s4Offset() const {
1778  // If we have an immediate that's not a constant, treat it as a label
1779  // reference needing a fixup. If it is a constant, it's something else
1780  // and we reject it.
1781  if (isImm() && !isa<MCConstantExpr>(getImm()))
1782  return true;
1783  if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1784  return false;
1785  // Immediate offset a multiple of 4 in range [-1020, 1020].
1786  if (!Memory.OffsetImm) return true;
1787  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1788  int64_t Val = CE->getValue();
1789  // Special case, #-0 is std::numeric_limits<int32_t>::min().
1790  return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) ||
1792  }
1793  return false;
1794  }
1795 
1796  bool isMemImm7s4Offset() const {
1797  // If we have an immediate that's not a constant, treat it as a label
1798  // reference needing a fixup. If it is a constant, it's something else
1799  // and we reject it.
1800  if (isImm() && !isa<MCConstantExpr>(getImm()))
1801  return true;
1802  if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 ||
1803  !ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1804  Memory.BaseRegNum))
1805  return false;
1806  // Immediate offset a multiple of 4 in range [-508, 508].
1807  if (!Memory.OffsetImm) return true;
1808  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1809  int64_t Val = CE->getValue();
1810  // Special case, #-0 is INT32_MIN.
1811  return (Val >= -508 && Val <= 508 && (Val & 3) == 0) || Val == INT32_MIN;
1812  }
1813  return false;
1814  }
1815 
1816  bool isMemImm0_1020s4Offset() const {
1817  if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1818  return false;
1819  // Immediate offset a multiple of 4 in range [0, 1020].
1820  if (!Memory.OffsetImm) return true;
1821  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1822  int64_t Val = CE->getValue();
1823  return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1824  }
1825  return false;
1826  }
1827 
1828  bool isMemImm8Offset() const {
1829  if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1830  return false;
1831  // Base reg of PC isn't allowed for these encodings.
1832  if (Memory.BaseRegNum == ARM::PC) return false;
1833  // Immediate offset in range [-255, 255].
1834  if (!Memory.OffsetImm) return true;
1835  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1836  int64_t Val = CE->getValue();
1837  return (Val == std::numeric_limits<int32_t>::min()) ||
1838  (Val > -256 && Val < 256);
1839  }
1840  return false;
1841  }
1842 
1843  template<unsigned Bits, unsigned RegClassID>
1844  bool isMemImm7ShiftedOffset() const {
1845  if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 ||
1846  !ARMMCRegisterClasses[RegClassID].contains(Memory.BaseRegNum))
1847  return false;
1848 
1849  // Expect an immediate offset equal to an element of the range
1850  // [-127, 127], shifted left by Bits.
1851 
1852  if (!Memory.OffsetImm) return true;
1853  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1854  int64_t Val = CE->getValue();
1855 
1856  // INT32_MIN is a special-case value (indicating the encoding with
1857  // zero offset and the subtract bit set)
1858  if (Val == INT32_MIN)
1859  return true;
1860 
1861  unsigned Divisor = 1U << Bits;
1862 
1863  // Check that the low bits are zero
1864  if (Val % Divisor != 0)
1865  return false;
1866 
1867  // Check that the remaining offset is within range.
1868  Val /= Divisor;
1869  return (Val >= -127 && Val <= 127);
1870  }
1871  return false;
1872  }
1873 
1874  template <int shift> bool isMemRegRQOffset() const {
1875  if (!isMVEMem() || Memory.OffsetImm != 0 || Memory.Alignment != 0)
1876  return false;
1877 
1878  if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1879  Memory.BaseRegNum))
1880  return false;
1881  if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1882  Memory.OffsetRegNum))
1883  return false;
1884 
1885  if (shift == 0 && Memory.ShiftType != ARM_AM::no_shift)
1886  return false;
1887 
1888  if (shift > 0 &&
1889  (Memory.ShiftType != ARM_AM::uxtw || Memory.ShiftImm != shift))
1890  return false;
1891 
1892  return true;
1893  }
1894 
1895  template <int shift> bool isMemRegQOffset() const {
1896  if (!isMVEMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1897  return false;
1898 
1899  if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1900  Memory.BaseRegNum))
1901  return false;
1902 
1903  if (!Memory.OffsetImm)
1904  return true;
1905  static_assert(shift < 56,
1906  "Such that we dont shift by a value higher than 62");
1907  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1908  int64_t Val = CE->getValue();
1909 
1910  // The value must be a multiple of (1 << shift)
1911  if ((Val & ((1U << shift) - 1)) != 0)
1912  return false;
1913 
1914  // And be in the right range, depending on the amount that it is shifted
1915  // by. Shift 0, is equal to 7 unsigned bits, the sign bit is set
1916  // separately.
1917  int64_t Range = (1U << (7 + shift)) - 1;
1918  return (Val == INT32_MIN) || (Val > -Range && Val < Range);
1919  }
1920  return false;
1921  }
1922 
1923  bool isMemPosImm8Offset() const {
1924  if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1925  return false;
1926  // Immediate offset in range [0, 255].
1927  if (!Memory.OffsetImm) return true;
1928  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1929  int64_t Val = CE->getValue();
1930  return Val >= 0 && Val < 256;
1931  }
1932  return false;
1933  }
1934 
1935  bool isMemNegImm8Offset() const {
1936  if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1937  return false;
1938  // Base reg of PC isn't allowed for these encodings.
1939  if (Memory.BaseRegNum == ARM::PC) return false;
1940  // Immediate offset in range [-255, -1].
1941  if (!Memory.OffsetImm) return false;
1942  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1943  int64_t Val = CE->getValue();
1944  return (Val == std::numeric_limits<int32_t>::min()) ||
1945  (Val > -256 && Val < 0);
1946  }
1947  return false;
1948  }
1949 
1950  bool isMemUImm12Offset() const {
1951  if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1952  return false;
1953  // Immediate offset in range [0, 4095].
1954  if (!Memory.OffsetImm) return true;
1955  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1956  int64_t Val = CE->getValue();
1957  return (Val >= 0 && Val < 4096);
1958  }
1959  return false;
1960  }
1961 
1962  bool isMemImm12Offset() const {
1963  // If we have an immediate that's not a constant, treat it as a label
1964  // reference needing a fixup. If it is a constant, it's something else
1965  // and we reject it.
1966 
1967  if (isImm() && !isa<MCConstantExpr>(getImm()))
1968  return true;
1969 
1970  if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1971  return false;
1972  // Immediate offset in range [-4095, 4095].
1973  if (!Memory.OffsetImm) return true;
1974  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1975  int64_t Val = CE->getValue();
1976  return (Val > -4096 && Val < 4096) ||
1978  }
1979  // If we have an immediate that's not a constant, treat it as a
1980  // symbolic expression needing a fixup.
1981  return true;
1982  }
1983 
1984  bool isConstPoolAsmImm() const {
1985  // Delay processing of Constant Pool Immediate, this will turn into
1986  // a constant. Match no other operand
1987  return (isConstantPoolImm());
1988  }
1989 
1990  bool isPostIdxImm8() const {
1991  if (!isImm()) return false;
1992  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1993  if (!CE) return false;
1994  int64_t Val = CE->getValue();
1995  return (Val > -256 && Val < 256) ||
1997  }
1998 
1999  bool isPostIdxImm8s4() const {
2000  if (!isImm()) return false;
2001  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2002  if (!CE) return false;
2003  int64_t Val = CE->getValue();
2004  return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
2006  }
2007 
2008  bool isMSRMask() const { return Kind == k_MSRMask; }
2009  bool isBankedReg() const { return Kind == k_BankedReg; }
2010  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
2011 
2012  // NEON operands.
2013  bool isSingleSpacedVectorList() const {
2014  return Kind == k_VectorList && !VectorList.isDoubleSpaced;
2015  }
2016 
2017  bool isDoubleSpacedVectorList() const {
2018  return Kind == k_VectorList && VectorList.isDoubleSpaced;
2019  }
2020 
2021  bool isVecListOneD() const {
2022  if (!isSingleSpacedVectorList()) return false;
2023  return VectorList.Count == 1;
2024  }
2025 
2026  bool isVecListTwoMQ() const {
2027  return isSingleSpacedVectorList() && VectorList.Count == 2 &&
2028  ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2029  VectorList.RegNum);
2030  }
2031 
2032  bool isVecListDPair() const {
2033  if (!isSingleSpacedVectorList()) return false;
2034  return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2035  .contains(VectorList.RegNum));
2036  }
2037 
2038  bool isVecListThreeD() const {
2039  if (!isSingleSpacedVectorList()) return false;
2040  return VectorList.Count == 3;
2041  }
2042 
2043  bool isVecListFourD() const {
2044  if (!isSingleSpacedVectorList()) return false;
2045  return VectorList.Count == 4;
2046  }
2047 
2048  bool isVecListDPairSpaced() const {
2049  if (Kind != k_VectorList) return false;
2050  if (isSingleSpacedVectorList()) return false;
2051  return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
2052  .contains(VectorList.RegNum));
2053  }
2054 
2055  bool isVecListThreeQ() const {
2056  if (!isDoubleSpacedVectorList()) return false;
2057  return VectorList.Count == 3;
2058  }
2059 
2060  bool isVecListFourQ() const {
2061  if (!isDoubleSpacedVectorList()) return false;
2062  return VectorList.Count == 4;
2063  }
2064 
2065  bool isVecListFourMQ() const {
2066  return isSingleSpacedVectorList() && VectorList.Count == 4 &&
2067  ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2068  VectorList.RegNum);
2069  }
2070 
2071  bool isSingleSpacedVectorAllLanes() const {
2072  return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
2073  }
2074 
2075  bool isDoubleSpacedVectorAllLanes() const {
2076  return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
2077  }
2078 
2079  bool isVecListOneDAllLanes() const {
2080  if (!isSingleSpacedVectorAllLanes()) return false;
2081  return VectorList.Count == 1;
2082  }
2083 
2084  bool isVecListDPairAllLanes() const {
2085  if (!isSingleSpacedVectorAllLanes()) return false;
2086  return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2087  .contains(VectorList.RegNum));
2088  }
2089 
2090  bool isVecListDPairSpacedAllLanes() const {
2091  if (!isDoubleSpacedVectorAllLanes()) return false;
2092  return VectorList.Count == 2;
2093  }
2094 
2095  bool isVecListThreeDAllLanes() const {
2096  if (!isSingleSpacedVectorAllLanes()) return false;
2097  return VectorList.Count == 3;
2098  }
2099 
2100  bool isVecListThreeQAllLanes() const {
2101  if (!isDoubleSpacedVectorAllLanes()) return false;
2102  return VectorList.Count == 3;
2103  }
2104 
2105  bool isVecListFourDAllLanes() const {
2106  if (!isSingleSpacedVectorAllLanes()) return false;
2107  return VectorList.Count == 4;
2108  }
2109 
2110  bool isVecListFourQAllLanes() const {
2111  if (!isDoubleSpacedVectorAllLanes()) return false;
2112  return VectorList.Count == 4;
2113  }
2114 
2115  bool isSingleSpacedVectorIndexed() const {
2116  return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
2117  }
2118 
2119  bool isDoubleSpacedVectorIndexed() const {
2120  return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
2121  }
2122 
2123  bool isVecListOneDByteIndexed() const {
2124  if (!isSingleSpacedVectorIndexed()) return false;
2125  return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
2126  }
2127 
2128  bool isVecListOneDHWordIndexed() const {
2129  if (!isSingleSpacedVectorIndexed()) return false;
2130  return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
2131  }
2132 
2133  bool isVecListOneDWordIndexed() const {
2134  if (!isSingleSpacedVectorIndexed()) return false;
2135  return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
2136  }
2137 
2138  bool isVecListTwoDByteIndexed() const {
2139  if (!isSingleSpacedVectorIndexed()) return false;
2140  return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
2141  }
2142 
2143  bool isVecListTwoDHWordIndexed() const {
2144  if (!isSingleSpacedVectorIndexed()) return false;
2145  return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2146  }
2147 
2148  bool isVecListTwoQWordIndexed() const {
2149  if (!isDoubleSpacedVectorIndexed()) return false;
2150  return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2151  }
2152 
2153  bool isVecListTwoQHWordIndexed() const {
2154  if (!isDoubleSpacedVectorIndexed()) return false;
2155  return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2156  }
2157 
2158  bool isVecListTwoDWordIndexed() const {
2159  if (!isSingleSpacedVectorIndexed()) return false;
2160  return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2161  }
2162 
2163  bool isVecListThreeDByteIndexed() const {
2164  if (!isSingleSpacedVectorIndexed()) return false;
2165  return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
2166  }
2167 
2168  bool isVecListThreeDHWordIndexed() const {
2169  if (!isSingleSpacedVectorIndexed()) return false;
2170  return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2171  }
2172 
2173  bool isVecListThreeQWordIndexed() const {
2174  if (!isDoubleSpacedVectorIndexed()) return false;
2175  return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2176  }
2177 
2178  bool isVecListThreeQHWordIndexed() const {
2179  if (!isDoubleSpacedVectorIndexed()) return false;
2180  return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2181  }
2182 
2183  bool isVecListThreeDWordIndexed() const {
2184  if (!isSingleSpacedVectorIndexed()) return false;
2185  return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2186  }
2187 
2188  bool isVecListFourDByteIndexed() const {
2189  if (!isSingleSpacedVectorIndexed()) return false;
2190  return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
2191  }
2192 
2193  bool isVecListFourDHWordIndexed() const {
2194  if (!isSingleSpacedVectorIndexed()) return false;
2195  return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2196  }
2197 
2198  bool isVecListFourQWordIndexed() const {
2199  if (!isDoubleSpacedVectorIndexed()) return false;
2200  return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2201  }
2202 
2203  bool isVecListFourQHWordIndexed() const {
2204  if (!isDoubleSpacedVectorIndexed()) return false;
2205  return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2206  }
2207 
2208  bool isVecListFourDWordIndexed() const {
2209  if (!isSingleSpacedVectorIndexed()) return false;
2210  return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2211  }
2212 
2213  bool isVectorIndex() const { return Kind == k_VectorIndex; }
2214 
2215  template <unsigned NumLanes>
2216  bool isVectorIndexInRange() const {
2217  if (Kind != k_VectorIndex) return false;
2218  return VectorIndex.Val < NumLanes;
2219  }
2220 
2221  bool isVectorIndex8() const { return isVectorIndexInRange<8>(); }
2222  bool isVectorIndex16() const { return isVectorIndexInRange<4>(); }
2223  bool isVectorIndex32() const { return isVectorIndexInRange<2>(); }
2224  bool isVectorIndex64() const { return isVectorIndexInRange<1>(); }
2225 
2226  template<int PermittedValue, int OtherPermittedValue>
2227  bool isMVEPairVectorIndex() const {
2228  if (Kind != k_VectorIndex) return false;
2229  return VectorIndex.Val == PermittedValue ||
2230  VectorIndex.Val == OtherPermittedValue;
2231  }
2232 
2233  bool isNEONi8splat() const {
2234  if (!isImm()) return false;
2235  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2236  // Must be a constant.
2237  if (!CE) return false;
2238  int64_t Value = CE->getValue();
2239  // i8 value splatted across 8 bytes. The immediate is just the 8 byte
2240  // value.
2241  return Value >= 0 && Value < 256;
2242  }
2243 
2244  bool isNEONi16splat() const {
2245  if (isNEONByteReplicate(2))
2246  return false; // Leave that for bytes replication and forbid by default.
2247  if (!isImm())
2248  return false;
2249  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2250  // Must be a constant.
2251  if (!CE) return false;
2252  unsigned Value = CE->getValue();
2253  return ARM_AM::isNEONi16splat(Value);
2254  }
2255 
2256  bool isNEONi16splatNot() const {
2257  if (!isImm())
2258  return false;
2259  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2260  // Must be a constant.
2261  if (!CE) return false;
2262  unsigned Value = CE->getValue();
2263  return ARM_AM::isNEONi16splat(~Value & 0xffff);
2264  }
2265 
2266  bool isNEONi32splat() const {
2267  if (isNEONByteReplicate(4))
2268  return false; // Leave that for bytes replication and forbid by default.
2269  if (!isImm())
2270  return false;
2271  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2272  // Must be a constant.
2273  if (!CE) return false;
2274  unsigned Value = CE->getValue();
2275  return ARM_AM::isNEONi32splat(Value);
2276  }
2277 
2278  bool isNEONi32splatNot() const {
2279  if (!isImm())
2280  return false;
2281  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2282  // Must be a constant.
2283  if (!CE) return false;
2284  unsigned Value = CE->getValue();
2285  return ARM_AM::isNEONi32splat(~Value);
2286  }
2287 
2288  static bool isValidNEONi32vmovImm(int64_t Value) {
2289  // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
2290  // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
2291  return ((Value & 0xffffffffffffff00) == 0) ||
2292  ((Value & 0xffffffffffff00ff) == 0) ||
2293  ((Value & 0xffffffffff00ffff) == 0) ||
2294  ((Value & 0xffffffff00ffffff) == 0) ||
2295  ((Value & 0xffffffffffff00ff) == 0xff) ||
2296  ((Value & 0xffffffffff00ffff) == 0xffff);
2297  }
2298 
2299  bool isNEONReplicate(unsigned Width, unsigned NumElems, bool Inv) const {
2300  assert((Width == 8 || Width == 16 || Width == 32) &&
2301  "Invalid element width");
2302  assert(NumElems * Width <= 64 && "Invalid result width");
2303 
2304  if (!isImm())
2305  return false;
2306  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2307  // Must be a constant.
2308  if (!CE)
2309  return false;
2310  int64_t Value = CE->getValue();
2311  if (!Value)
2312  return false; // Don't bother with zero.
2313  if (Inv)
2314  Value = ~Value;
2315 
2316  uint64_t Mask = (1ull << Width) - 1;
2317  uint64_t Elem = Value & Mask;
2318  if (Width == 16 && (Elem & 0x00ff) != 0 && (Elem & 0xff00) != 0)
2319  return false;
2320  if (Width == 32 && !isValidNEONi32vmovImm(Elem))
2321  return false;
2322 
2323  for (unsigned i = 1; i < NumElems; ++i) {
2324  Value >>= Width;
2325  if ((Value & Mask) != Elem)
2326  return false;
2327  }
2328  return true;
2329  }
2330 
2331  bool isNEONByteReplicate(unsigned NumBytes) const {
2332  return isNEONReplicate(8, NumBytes, false);
2333  }
2334 
2335  static void checkNeonReplicateArgs(unsigned FromW, unsigned ToW) {
2336  assert((FromW == 8 || FromW == 16 || FromW == 32) &&
2337  "Invalid source width");
2338  assert((ToW == 16 || ToW == 32 || ToW == 64) &&
2339  "Invalid destination width");
2340  assert(FromW < ToW && "ToW is not less than FromW");
2341  }
2342 
2343  template<unsigned FromW, unsigned ToW>
2344  bool isNEONmovReplicate() const {
2345  checkNeonReplicateArgs(FromW, ToW);
2346  if (ToW == 64 && isNEONi64splat())
2347  return false;
2348  return isNEONReplicate(FromW, ToW / FromW, false);
2349  }
2350 
2351  template<unsigned FromW, unsigned ToW>
2352  bool isNEONinvReplicate() const {
2353  checkNeonReplicateArgs(FromW, ToW);
2354  return isNEONReplicate(FromW, ToW / FromW, true);
2355  }
2356 
2357  bool isNEONi32vmov() const {
2358  if (isNEONByteReplicate(4))
2359  return false; // Let it to be classified as byte-replicate case.
2360  if (!isImm())
2361  return false;
2362  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2363  // Must be a constant.
2364  if (!CE)
2365  return false;
2366  return isValidNEONi32vmovImm(CE->getValue());
2367  }
2368 
2369  bool isNEONi32vmovNeg() const {
2370  if (!isImm()) return false;
2371  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2372  // Must be a constant.
2373  if (!CE) return false;
2374  return isValidNEONi32vmovImm(~CE->getValue());
2375  }
2376 
2377  bool isNEONi64splat() const {
2378  if (!isImm()) return false;
2379  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2380  // Must be a constant.
2381  if (!CE) return false;
2382  uint64_t Value = CE->getValue();
2383  // i64 value with each byte being either 0 or 0xff.
2384  for (unsigned i = 0; i < 8; ++i, Value >>= 8)
2385  if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
2386  return true;
2387  }
2388 
2389  template<int64_t Angle, int64_t Remainder>
2390  bool isComplexRotation() const {
2391  if (!isImm()) return false;
2392 
2393  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2394  if (!CE) return false;
2395  uint64_t Value = CE->getValue();
2396 
2397  return (Value % Angle == Remainder && Value <= 270);
2398  }
2399 
2400  bool isMVELongShift() const {
2401  if (!isImm()) return false;
2402  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2403  // Must be a constant.
2404  if (!CE) return false;
2405  uint64_t Value = CE->getValue();
2406  return Value >= 1 && Value <= 32;
2407  }
2408 
2409  bool isMveSaturateOp() const {
2410  if (!isImm()) return false;
2411  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2412  if (!CE) return false;
2413  uint64_t Value = CE->getValue();
2414  return Value == 48 || Value == 64;
2415  }
2416 
2417  bool isITCondCodeNoAL() const {
2418  if (!isITCondCode()) return false;
2420  return CC != ARMCC::AL;
2421  }
2422 
2423  bool isITCondCodeRestrictedI() const {
2424  if (!isITCondCode())
2425  return false;
2427  return CC == ARMCC::EQ || CC == ARMCC::NE;
2428  }
2429 
2430  bool isITCondCodeRestrictedS() const {
2431  if (!isITCondCode())
2432  return false;
2434  return CC == ARMCC::LT || CC == ARMCC::GT || CC == ARMCC::LE ||
2435  CC == ARMCC::GE;
2436  }
2437 
2438  bool isITCondCodeRestrictedU() const {
2439  if (!isITCondCode())
2440  return false;
2442  return CC == ARMCC::HS || CC == ARMCC::HI;
2443  }
2444 
2445  bool isITCondCodeRestrictedFP() const {
2446  if (!isITCondCode())
2447  return false;
2449  return CC == ARMCC::EQ || CC == ARMCC::NE || CC == ARMCC::LT ||
2450  CC == ARMCC::GT || CC == ARMCC::LE || CC == ARMCC::GE;
2451  }
2452 
2453  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
2454  // Add as immediates when possible. Null MCExpr = 0.
2455  if (!Expr)
2457  else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
2458  Inst.addOperand(MCOperand::createImm(CE->getValue()));
2459  else
2460  Inst.addOperand(MCOperand::createExpr(Expr));
2461  }
2462 
2463  void addARMBranchTargetOperands(MCInst &Inst, unsigned N) const {
2464  assert(N == 1 && "Invalid number of operands!");
2465  addExpr(Inst, getImm());
2466  }
2467 
2468  void addThumbBranchTargetOperands(MCInst &Inst, unsigned N) const {
2469  assert(N == 1 && "Invalid number of operands!");
2470  addExpr(Inst, getImm());
2471  }
2472 
2473  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
2474  assert(N == 2 && "Invalid number of operands!");
2475  Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2476  unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
2477  Inst.addOperand(MCOperand::createReg(RegNum));
2478  }
2479 
2480  void addVPTPredNOperands(MCInst &Inst, unsigned N) const {
2481  assert(N == 2 && "Invalid number of operands!");
2482  Inst.addOperand(MCOperand::createImm(unsigned(getVPTPred())));
2483  unsigned RegNum = getVPTPred() == ARMVCC::None ? 0: ARM::P0;
2484  Inst.addOperand(MCOperand::createReg(RegNum));
2485  }
2486 
2487  void addVPTPredROperands(MCInst &Inst, unsigned N) const {
2488  assert(N == 3 && "Invalid number of operands!");
2489  addVPTPredNOperands(Inst, N-1);
2490  unsigned RegNum;
2491  if (getVPTPred() == ARMVCC::None) {
2492  RegNum = 0;
2493  } else {
2494  unsigned NextOpIndex = Inst.getNumOperands();
2495  const MCInstrDesc &MCID = ARMInsts[Inst.getOpcode()];
2496  int TiedOp = MCID.getOperandConstraint(NextOpIndex, MCOI::TIED_TO);
2497  assert(TiedOp >= 0 &&
2498  "Inactive register in vpred_r is not tied to an output!");
2499  RegNum = Inst.getOperand(TiedOp).getReg();
2500  }
2501  Inst.addOperand(MCOperand::createReg(RegNum));
2502  }
2503 
2504  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
2505  assert(N == 1 && "Invalid number of operands!");
2506  Inst.addOperand(MCOperand::createImm(getCoproc()));
2507  }
2508 
2509  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
2510  assert(N == 1 && "Invalid number of operands!");
2511  Inst.addOperand(MCOperand::createImm(getCoproc()));
2512  }
2513 
2514  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
2515  assert(N == 1 && "Invalid number of operands!");
2516  Inst.addOperand(MCOperand::createImm(CoprocOption.Val));
2517  }
2518 
2519  void addITMaskOperands(MCInst &Inst, unsigned N) const {
2520  assert(N == 1 && "Invalid number of operands!");
2521  Inst.addOperand(MCOperand::createImm(ITMask.Mask));
2522  }
2523 
2524  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
2525  assert(N == 1 && "Invalid number of operands!");
2526  Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2527  }
2528 
2529  void addITCondCodeInvOperands(MCInst &Inst, unsigned N) const {
2530  assert(N == 1 && "Invalid number of operands!");
2532  }
2533 
2534  void addCCOutOperands(MCInst &Inst, unsigned N) const {
2535  assert(N == 1 && "Invalid number of operands!");
2537  }
2538 
2539  void addRegOperands(MCInst &Inst, unsigned N) const {
2540  assert(N == 1 && "Invalid number of operands!");
2542  }
2543 
2544  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
2545  assert(N == 3 && "Invalid number of operands!");
2546  assert(isRegShiftedReg() &&
2547  "addRegShiftedRegOperands() on non-RegShiftedReg!");
2548  Inst.addOperand(MCOperand::createReg(RegShiftedReg.SrcReg));
2549  Inst.addOperand(MCOperand::createReg(RegShiftedReg.ShiftReg));
2551  ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
2552  }
2553 
2554  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
2555  assert(N == 2 && "Invalid number of operands!");
2556  assert(isRegShiftedImm() &&
2557  "addRegShiftedImmOperands() on non-RegShiftedImm!");
2558  Inst.addOperand(MCOperand::createReg(RegShiftedImm.SrcReg));
2559  // Shift of #32 is encoded as 0 where permitted
2560  unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
2562  ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
2563  }
2564 
2565  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
2566  assert(N == 1 && "Invalid number of operands!");
2567  Inst.addOperand(MCOperand::createImm((ShifterImm.isASR << 5) |
2568  ShifterImm.Imm));
2569  }
2570 
2571  void addRegListOperands(MCInst &Inst, unsigned N) const {
2572  assert(N == 1 && "Invalid number of operands!");
2573  const SmallVectorImpl<unsigned> &RegList = getRegList();
2575  I = RegList.begin(), E = RegList.end(); I != E; ++I)
2577  }
2578 
2579  void addRegListWithAPSROperands(MCInst &Inst, unsigned N) const {
2580  assert(N == 1 && "Invalid number of operands!");
2581  const SmallVectorImpl<unsigned> &RegList = getRegList();
2583  I = RegList.begin(), E = RegList.end(); I != E; ++I)
2585  }
2586 
2587  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
2588  addRegListOperands(Inst, N);
2589  }
2590 
2591  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
2592  addRegListOperands(Inst, N);
2593  }
2594 
2595  void addFPSRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2596  addRegListOperands(Inst, N);
2597  }
2598 
2599  void addFPDRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2600  addRegListOperands(Inst, N);
2601  }
2602 
2603  void addRotImmOperands(MCInst &Inst, unsigned N) const {
2604  assert(N == 1 && "Invalid number of operands!");
2605  // Encoded as val>>3. The printer handles display as 8, 16, 24.
2606  Inst.addOperand(MCOperand::createImm(RotImm.Imm >> 3));
2607  }
2608 
2609  void addModImmOperands(MCInst &Inst, unsigned N) const {
2610  assert(N == 1 && "Invalid number of operands!");
2611 
2612  // Support for fixups (MCFixup)
2613  if (isImm())
2614  return addImmOperands(Inst, N);
2615 
2616  Inst.addOperand(MCOperand::createImm(ModImm.Bits | (ModImm.Rot << 7)));
2617  }
2618 
2619  void addModImmNotOperands(MCInst &Inst, unsigned N) const {
2620  assert(N == 1 && "Invalid number of operands!");
2621  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2622  uint32_t Enc = ARM_AM::getSOImmVal(~CE->getValue());
2623  Inst.addOperand(MCOperand::createImm(Enc));
2624  }
2625 
2626  void addModImmNegOperands(MCInst &Inst, unsigned N) const {
2627  assert(N == 1 && "Invalid number of operands!");
2628  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2629  uint32_t Enc = ARM_AM::getSOImmVal(-CE->getValue());
2630  Inst.addOperand(MCOperand::createImm(Enc));
2631  }
2632 
2633  void addThumbModImmNeg8_255Operands(MCInst &Inst, unsigned N) const {
2634  assert(N == 1 && "Invalid number of operands!");
2635  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2636  uint32_t Val = -CE->getValue();
2637  Inst.addOperand(MCOperand::createImm(Val));
2638  }
2639 
2640  void addThumbModImmNeg1_7Operands(MCInst &Inst, unsigned N) const {
2641  assert(N == 1 && "Invalid number of operands!");
2642  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2643  uint32_t Val = -CE->getValue();
2644  Inst.addOperand(MCOperand::createImm(Val));
2645  }
2646 
2647  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
2648  assert(N == 1 && "Invalid number of operands!");
2649  // Munge the lsb/width into a bitfield mask.
2650  unsigned lsb = Bitfield.LSB;
2651  unsigned width = Bitfield.Width;
2652  // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
2653  uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
2654  (32 - (lsb + width)));
2656  }
2657 
2658  void addImmOperands(MCInst &Inst, unsigned N) const {
2659  assert(N == 1 && "Invalid number of operands!");
2660  addExpr(Inst, getImm());
2661  }
2662 
2663  void addFBits16Operands(MCInst &Inst, unsigned N) const {
2664  assert(N == 1 && "Invalid number of operands!");
2665  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2666  Inst.addOperand(MCOperand::createImm(16 - CE->getValue()));
2667  }
2668 
2669  void addFBits32Operands(MCInst &Inst, unsigned N) const {
2670  assert(N == 1 && "Invalid number of operands!");
2671  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2672  Inst.addOperand(MCOperand::createImm(32 - CE->getValue()));
2673  }
2674 
2675  void addFPImmOperands(MCInst &Inst, unsigned N) const {
2676  assert(N == 1 && "Invalid number of operands!");
2677  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2678  int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
2679  Inst.addOperand(MCOperand::createImm(Val));
2680  }
2681 
2682  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
2683  assert(N == 1 && "Invalid number of operands!");
2684  // FIXME: We really want to scale the value here, but the LDRD/STRD
2685  // instruction don't encode operands that way yet.
2686  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2687  Inst.addOperand(MCOperand::createImm(CE->getValue()));
2688  }
2689 
2690  void addImm7s4Operands(MCInst &Inst, unsigned N) const {
2691  assert(N == 1 && "Invalid number of operands!");
2692  // FIXME: We really want to scale the value here, but the VSTR/VLDR_VSYSR
2693  // instruction don't encode operands that way yet.
2694  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2695  Inst.addOperand(MCOperand::createImm(CE->getValue()));
2696  }
2697 
2698  void addImm7Shift0Operands(MCInst &Inst, unsigned N) const {
2699  assert(N == 1 && "Invalid number of operands!");
2700  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2701  Inst.addOperand(MCOperand::createImm(CE->getValue()));
2702  }
2703 
2704  void addImm7Shift1Operands(MCInst &Inst, unsigned N) const {
2705  assert(N == 1 && "Invalid number of operands!");
2706  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2707  Inst.addOperand(MCOperand::createImm(CE->getValue()));
2708  }
2709 
2710  void addImm7Shift2Operands(MCInst &Inst, unsigned N) const {
2711  assert(N == 1 && "Invalid number of operands!");
2712  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2713  Inst.addOperand(MCOperand::createImm(CE->getValue()));
2714  }
2715 
2716  void addImm7Operands(MCInst &Inst, unsigned N) const {
2717  assert(N == 1 && "Invalid number of operands!");
2718  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2719  Inst.addOperand(MCOperand::createImm(CE->getValue()));
2720  }
2721 
2722  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
2723  assert(N == 1 && "Invalid number of operands!");
2724  // The immediate is scaled by four in the encoding and is stored
2725  // in the MCInst as such. Lop off the low two bits here.
2726  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2727  Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2728  }
2729 
2730  void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
2731  assert(N == 1 && "Invalid number of operands!");
2732  // The immediate is scaled by four in the encoding and is stored
2733  // in the MCInst as such. Lop off the low two bits here.
2734  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2735  Inst.addOperand(MCOperand::createImm(-(CE->getValue() / 4)));
2736  }
2737 
2738  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
2739  assert(N == 1 && "Invalid number of operands!");
2740  // The immediate is scaled by four in the encoding and is stored
2741  // in the MCInst as such. Lop off the low two bits here.
2742  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2743  Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2744  }
2745 
2746  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
2747  assert(N == 1 && "Invalid number of operands!");
2748  // The constant encodes as the immediate-1, and we store in the instruction
2749  // the bits as encoded, so subtract off one here.
2750  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2751  Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2752  }
2753 
2754  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
2755  assert(N == 1 && "Invalid number of operands!");
2756  // The constant encodes as the immediate-1, and we store in the instruction
2757  // the bits as encoded, so subtract off one here.
2758  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2759  Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2760  }
2761 
2762  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
2763  assert(N == 1 && "Invalid number of operands!");
2764  // The constant encodes as the immediate, except for 32, which encodes as
2765  // zero.
2766  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2767  unsigned Imm = CE->getValue();
2768  Inst.addOperand(MCOperand::createImm((Imm == 32 ? 0 : Imm)));
2769  }
2770 
2771  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
2772  assert(N == 1 && "Invalid number of operands!");
2773  // An ASR value of 32 encodes as 0, so that's how we want to add it to
2774  // the instruction as well.
2775  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2776  int Val = CE->getValue();
2777  Inst.addOperand(MCOperand::createImm(Val == 32 ? 0 : Val));
2778  }
2779 
2780  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
2781  assert(N == 1 && "Invalid number of operands!");
2782  // The operand is actually a t2_so_imm, but we have its bitwise
2783  // negation in the assembly source, so twiddle it here.
2784  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2785  Inst.addOperand(MCOperand::createImm(~(uint32_t)CE->getValue()));
2786  }
2787 
2788  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
2789  assert(N == 1 && "Invalid number of operands!");
2790  // The operand is actually a t2_so_imm, but we have its
2791  // negation in the assembly source, so twiddle it here.
2792  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2793  Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2794  }
2795 
2796  void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
2797  assert(N == 1 && "Invalid number of operands!");
2798  // The operand is actually an imm0_4095, but we have its
2799  // negation in the assembly source, so twiddle it here.
2800  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2801  Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2802  }
2803 
2804  void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const {
2805  if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
2806  Inst.addOperand(MCOperand::createImm(CE->getValue() >> 2));
2807  return;
2808  }
2809  const MCSymbolRefExpr *SR = cast<MCSymbolRefExpr>(Imm.Val);
2811  }
2812 
2813  void addThumbMemPCOperands(MCInst &Inst, unsigned N) const {
2814  assert(N == 1 && "Invalid number of operands!");
2815  if (isImm()) {
2816  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2817  if (CE) {
2818  Inst.addOperand(MCOperand::createImm(CE->getValue()));
2819  return;
2820  }
2821  const MCSymbolRefExpr *SR = cast<MCSymbolRefExpr>(Imm.Val);
2823  return;
2824  }
2825 
2826  assert(isGPRMem() && "Unknown value type!");
2827  assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!");
2828  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
2829  Inst.addOperand(MCOperand::createImm(CE->getValue()));
2830  else
2831  Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
2832  }
2833 
2834  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
2835  assert(N == 1 && "Invalid number of operands!");
2836  Inst.addOperand(MCOperand::createImm(unsigned(getMemBarrierOpt())));
2837  }
2838 
2839  void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2840  assert(N == 1 && "Invalid number of operands!");
2841  Inst.addOperand(MCOperand::createImm(unsigned(getInstSyncBarrierOpt())));
2842  }
2843 
2844  void addTraceSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2845  assert(N == 1 && "Invalid number of operands!");
2846  Inst.addOperand(MCOperand::createImm(unsigned(getTraceSyncBarrierOpt())));
2847  }
2848 
2849  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
2850  assert(N == 1 && "Invalid number of operands!");
2851  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2852  }
2853 
2854  void addMemNoOffsetT2Operands(MCInst &Inst, unsigned N) const {
2855  assert(N == 1 && "Invalid number of operands!");
2856  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2857  }
2858 
2859  void addMemNoOffsetT2NoSpOperands(MCInst &Inst, unsigned N) const {
2860  assert(N == 1 && "Invalid number of operands!");
2861  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2862  }
2863 
2864  void addMemNoOffsetTOperands(MCInst &Inst, unsigned N) const {
2865  assert(N == 1 && "Invalid number of operands!");
2866  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2867  }
2868 
2869  void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
2870  assert(N == 1 && "Invalid number of operands!");
2871  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
2872  Inst.addOperand(MCOperand::createImm(CE->getValue()));
2873  else
2874  Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
2875  }
2876 
2877  void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2878  assert(N == 1 && "Invalid number of operands!");
2879  assert(isImm() && "Not an immediate!");
2880 
2881  // If we have an immediate that's not a constant, treat it as a label
2882  // reference needing a fixup.
2883  if (!isa<MCConstantExpr>(getImm())) {
2884  Inst.addOperand(MCOperand::createExpr(getImm()));
2885  return;
2886  }
2887 
2888  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2889  int Val = CE->getValue();
2890  Inst.addOperand(MCOperand::createImm(Val));
2891  }
2892 
2893  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
2894  assert(N == 2 && "Invalid number of operands!");
2895  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2896  Inst.addOperand(MCOperand::createImm(Memory.Alignment));
2897  }
2898 
2899  void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2900  addAlignedMemoryOperands(Inst, N);
2901  }
2902 
2903  void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2904  addAlignedMemoryOperands(Inst, N);
2905  }
2906 
2907  void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2908  addAlignedMemoryOperands(Inst, N);
2909  }
2910 
2911  void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2912  addAlignedMemoryOperands(Inst, N);
2913  }
2914 
2915  void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2916  addAlignedMemoryOperands(Inst, N);
2917  }
2918 
2919  void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2920  addAlignedMemoryOperands(Inst, N);
2921  }
2922 
2923  void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2924  addAlignedMemoryOperands(Inst, N);
2925  }
2926 
2927  void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2928  addAlignedMemoryOperands(Inst, N);
2929  }
2930 
2931  void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2932  addAlignedMemoryOperands(Inst, N);
2933  }
2934 
2935  void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2936  addAlignedMemoryOperands(Inst, N);
2937  }
2938 
2939  void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const {
2940  addAlignedMemoryOperands(Inst, N);
2941  }
2942 
2943  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
2944  assert(N == 3 && "Invalid number of operands!");
2945  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2946  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2947  if (!Memory.OffsetRegNum) {
2948  if (!Memory.OffsetImm)
2950  else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
2951  int32_t Val = CE->getValue();
2953  // Special case for #-0
2954  if (Val == std::numeric_limits<int32_t>::min())
2955  Val = 0;
2956  if (Val < 0)
2957  Val = -Val;
2958  Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2959  Inst.addOperand(MCOperand::createImm(Val));
2960  } else
2961  Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
2962  } else {
2963  // For register offset, we encode the shift type and negation flag
2964  // here.
2965  int32_t Val =
2967  Memory.ShiftImm, Memory.ShiftType);
2968  Inst.addOperand(MCOperand::createImm(Val));
2969  }
2970  }
2971 
2972  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
2973  assert(N == 2 && "Invalid number of operands!");
2974  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2975  assert(CE && "non-constant AM2OffsetImm operand!");
2976  int32_t Val = CE->getValue();
2978  // Special case for #-0
2979  if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2980  if (Val < 0) Val = -Val;
2981  Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2983  Inst.addOperand(MCOperand::createImm(Val));
2984  }
2985 
2986  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
2987  assert(N == 3 && "Invalid number of operands!");
2988  // If we have an immediate that's not a constant, treat it as a label
2989  // reference needing a fixup. If it is a constant, it's something else
2990  // and we reject it.
2991  if (isImm()) {
2992  Inst.addOperand(MCOperand::createExpr(getImm()));
2995  return;
2996  }
2997 
2998  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2999  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3000  if (!Memory.OffsetRegNum) {
3001  if (!Memory.OffsetImm)
3003  else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3004  int32_t Val = CE->getValue();
3006  // Special case for #-0
3007  if (Val == std::numeric_limits<int32_t>::min())
3008  Val = 0;
3009  if (Val < 0)
3010  Val = -Val;
3011  Val = ARM_AM::getAM3Opc(AddSub, Val);
3012  Inst.addOperand(MCOperand::createImm(Val));
3013  } else
3014  Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3015  } else {
3016  // For register offset, we encode the shift type and negation flag
3017  // here.
3018  int32_t Val =
3019  ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
3020  Inst.addOperand(MCOperand::createImm(Val));
3021  }
3022  }
3023 
3024  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
3025  assert(N == 2 && "Invalid number of operands!");
3026  if (Kind == k_PostIndexRegister) {
3027  int32_t Val =
3028  ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
3029  Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3030  Inst.addOperand(MCOperand::createImm(Val));
3031  return;
3032  }
3033 
3034  // Constant offset.
3035  const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
3036  int32_t Val = CE->getValue();
3038  // Special case for #-0
3039  if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
3040  if (Val < 0) Val = -Val;
3041  Val = ARM_AM::getAM3Opc(AddSub, Val);
3043  Inst.addOperand(MCOperand::createImm(Val));
3044  }
3045 
3046  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
3047  assert(N == 2 && "Invalid number of operands!");
3048  // If we have an immediate that's not a constant, treat it as a label
3049  // reference needing a fixup. If it is a constant, it's something else
3050  // and we reject it.
3051  if (isImm()) {
3052  Inst.addOperand(MCOperand::createExpr(getImm()));
3054  return;
3055  }
3056 
3057  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3058  if (!Memory.OffsetImm)
3060  else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3061  // The lower two bits are always zero and as such are not encoded.
3062  int32_t Val = CE->getValue() / 4;
3064  // Special case for #-0
3065  if (Val == std::numeric_limits<int32_t>::min())
3066  Val = 0;
3067  if (Val < 0)
3068  Val = -Val;
3069  Val = ARM_AM::getAM5Opc(AddSub, Val);
3070  Inst.addOperand(MCOperand::createImm(Val));
3071  } else
3072  Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3073  }
3074 
3075  void addAddrMode5FP16Operands(MCInst &Inst, unsigned N) const {
3076  assert(N == 2 && "Invalid number of operands!");
3077  // If we have an immediate that's not a constant, treat it as a label
3078  // reference needing a fixup. If it is a constant, it's something else
3079  // and we reject it.
3080  if (isImm()) {
3081  Inst.addOperand(MCOperand::createExpr(getImm()));
3083  return;
3084  }
3085 
3086  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3087  // The lower bit is always zero and as such is not encoded.
3088  if (!Memory.OffsetImm)
3090  else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3091  int32_t Val = CE->getValue() / 2;
3093  // Special case for #-0
3094  if (Val == std::numeric_limits<int32_t>::min())
3095  Val = 0;
3096  if (Val < 0)
3097  Val = -Val;
3098  Val = ARM_AM::getAM5FP16Opc(AddSub, Val);
3099  Inst.addOperand(MCOperand::createImm(Val));
3100  } else
3101  Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3102  }
3103 
3104  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
3105  assert(N == 2 && "Invalid number of operands!");
3106  // If we have an immediate that's not a constant, treat it as a label
3107  // reference needing a fixup. If it is a constant, it's something else
3108  // and we reject it.
3109  if (isImm()) {
3110  Inst.addOperand(MCOperand::createExpr(getImm()));
3112  return;
3113  }
3114 
3115  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3116  addExpr(Inst, Memory.OffsetImm);
3117  }
3118 
3119  void addMemImm7s4OffsetOperands(MCInst &Inst, unsigned N) const {
3120  assert(N == 2 && "Invalid number of operands!");
3121  // If we have an immediate that's not a constant, treat it as a label
3122  // reference needing a fixup. If it is a constant, it's something else
3123  // and we reject it.
3124  if (isImm()) {
3125  Inst.addOperand(MCOperand::createExpr(getImm()));
3127  return;
3128  }
3129 
3130  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3131  addExpr(Inst, Memory.OffsetImm);
3132  }
3133 
3134  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
3135  assert(N == 2 && "Invalid number of operands!");
3136  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3137  if (!Memory.OffsetImm)
3139  else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3140  // The lower two bits are always zero and as such are not encoded.
3141  Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3142  else
3143  Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3144  }
3145 
3146  void addMemImmOffsetOperands(MCInst &Inst, unsigned N) const {
3147  assert(N == 2 && "Invalid number of operands!");
3148  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3149  addExpr(Inst, Memory.OffsetImm);
3150  }
3151 
3152  void addMemRegRQOffsetOperands(MCInst &Inst, unsigned N) const {
3153  assert(N == 2 && "Invalid number of operands!");
3154  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3155  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3156  }
3157 
3158  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
3159  assert(N == 2 && "Invalid number of operands!");
3160  // If this is an immediate, it's a label reference.
3161  if (isImm()) {
3162  addExpr(Inst, getImm());
3164  return;
3165  }
3166 
3167  // Otherwise, it's a normal memory reg+offset.
3168  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3169  addExpr(Inst, Memory.OffsetImm);
3170  }
3171 
3172  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
3173  assert(N == 2 && "Invalid number of operands!");
3174  // If this is an immediate, it's a label reference.
3175  if (isImm()) {
3176  addExpr(Inst, getImm());
3178  return;
3179  }
3180 
3181  // Otherwise, it's a normal memory reg+offset.
3182  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3183  addExpr(Inst, Memory.OffsetImm);
3184  }
3185 
3186  void addConstPoolAsmImmOperands(MCInst &Inst, unsigned N) const {
3187  assert(N == 1 && "Invalid number of operands!");
3188  // This is container for the immediate that we will create the constant
3189  // pool from
3190  addExpr(Inst, getConstantPoolImm());
3191  }
3192 
3193  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
3194  assert(N == 2 && "Invalid number of operands!");
3195  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3196  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3197  }
3198 
3199  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
3200  assert(N == 2 && "Invalid number of operands!");
3201  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3202  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3203  }
3204 
3205  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
3206  assert(N == 3 && "Invalid number of operands!");
3207  unsigned Val =
3209  Memory.ShiftImm, Memory.ShiftType);
3210  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3211  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3212  Inst.addOperand(MCOperand::createImm(Val));
3213  }
3214 
3215  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
3216  assert(N == 3 && "Invalid number of operands!");
3217  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3218  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3219  Inst.addOperand(MCOperand::createImm(Memory.ShiftImm));
3220  }
3221 
3222  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
3223  assert(N == 2 && "Invalid number of operands!");
3224  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3225  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3226  }
3227 
3228  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
3229  assert(N == 2 && "Invalid number of operands!");
3230  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3231  if (!Memory.OffsetImm)
3233  else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3234  // The lower two bits are always zero and as such are not encoded.
3235  Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3236  else
3237  Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3238  }
3239 
3240  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
3241  assert(N == 2 && "Invalid number of operands!");
3242  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3243  if (!Memory.OffsetImm)
3245  else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3246  Inst.addOperand(MCOperand::createImm(CE->getValue() / 2));
3247  else
3248  Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3249  }
3250 
3251  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
3252  assert(N == 2 && "Invalid number of operands!");
3253  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3254  addExpr(Inst, Memory.OffsetImm);
3255  }
3256 
3257  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
3258  assert(N == 2 && "Invalid number of operands!");
3259  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3260  if (!Memory.OffsetImm)
3262  else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3263  // The lower two bits are always zero and as such are not encoded.
3264  Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3265  else
3266  Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3267  }
3268 
3269  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
3270  assert(N == 1 && "Invalid number of operands!");
3271  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3272  assert(CE && "non-constant post-idx-imm8 operand!");
3273  int Imm = CE->getValue();
3274  bool isAdd = Imm >= 0;
3275  if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
3276  Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
3277  Inst.addOperand(MCOperand::createImm(Imm));
3278  }
3279 
3280  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
3281  assert(N == 1 && "Invalid number of operands!");
3282  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3283  assert(CE && "non-constant post-idx-imm8s4 operand!");
3284  int Imm = CE->getValue();
3285  bool isAdd = Imm >= 0;
3286  if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
3287  // Immediate is scaled by 4.
3288  Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
3289  Inst.addOperand(MCOperand::createImm(Imm));
3290  }
3291 
3292  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
3293  assert(N == 2 && "Invalid number of operands!");
3294  Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3295  Inst.addOperand(MCOperand::createImm(PostIdxReg.isAdd));
3296  }
3297 
3298  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
3299  assert(N == 2 && "Invalid number of operands!");
3300  Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3301  // The sign, shift type, and shift amount are encoded in a single operand
3302  // using the AM2 encoding helpers.
3303  ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
3304  unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
3305  PostIdxReg.ShiftTy);
3306  Inst.addOperand(MCOperand::createImm(Imm));
3307  }
3308 
3309  void addPowerTwoOperands(MCInst &Inst, unsigned N) const {
3310  assert(N == 1 && "Invalid number of operands!");
3311  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3312  Inst.addOperand(MCOperand::createImm(CE->getValue()));
3313  }
3314 
3315  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
3316  assert(N == 1 && "Invalid number of operands!");
3317  Inst.addOperand(MCOperand::createImm(unsigned(getMSRMask())));
3318  }
3319 
3320  void addBankedRegOperands(MCInst &Inst, unsigned N) const {
3321  assert(N == 1 && "Invalid number of operands!");
3322  Inst.addOperand(MCOperand::createImm(unsigned(getBankedReg())));
3323  }
3324 
3325  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
3326  assert(N == 1 && "Invalid number of operands!");
3327  Inst.addOperand(MCOperand::createImm(unsigned(getProcIFlags())));
3328  }
3329 
3330  void addVecListOperands(MCInst &Inst, unsigned N) const {
3331  assert(N == 1 && "Invalid number of operands!");
3332  Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
3333  }
3334 
3335  void addMVEVecListOperands(MCInst &Inst, unsigned N) const {
3336  assert(N == 1 && "Invalid number of operands!");
3337 
3338  // When we come here, the VectorList field will identify a range
3339  // of q-registers by its base register and length, and it will
3340  // have already been error-checked to be the expected length of
3341  // range and contain only q-regs in the range q0-q7. So we can
3342  // count on the base register being in the range q0-q6 (for 2
3343  // regs) or q0-q4 (for 4)
3344  //
3345  // The MVE instructions taking a register range of this kind will
3346  // need an operand in the QQPR or QQQQPR class, representing the
3347  // entire range as a unit. So we must translate into that class,
3348  // by finding the index of the base register in the MQPR reg
3349  // class, and returning the super-register at the corresponding
3350  // index in the target class.
3351 
3352  const MCRegisterClass *RC_in = &ARMMCRegisterClasses[ARM::MQPRRegClassID];
3353  const MCRegisterClass *RC_out = (VectorList.Count == 2) ?
3354  &ARMMCRegisterClasses[ARM::QQPRRegClassID] :
3355  &ARMMCRegisterClasses[ARM::QQQQPRRegClassID];
3356 
3357  unsigned I, E = RC_out->getNumRegs();
3358  for (I = 0; I < E; I++)
3359  if (RC_in->getRegister(I) == VectorList.RegNum)
3360  break;
3361  assert(I < E && "Invalid vector list start register!");
3362 
3363  Inst.addOperand(MCOperand::createReg(RC_out->getRegister(I)));
3364  }
3365 
3366  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
3367  assert(N == 2 && "Invalid number of operands!");
3368  Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
3369  Inst.addOperand(MCOperand::createImm(VectorList.LaneIndex));
3370  }
3371 
3372  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
3373  assert(N == 1 && "Invalid number of operands!");
3374  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3375  }
3376 
3377  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
3378  assert(N == 1 && "Invalid number of operands!");
3379  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3380  }
3381 
3382  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
3383  assert(N == 1 && "Invalid number of operands!");
3384  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3385  }
3386 
3387  void addVectorIndex64Operands(MCInst &Inst, unsigned N) const {
3388  assert(N == 1 && "Invalid number of operands!");
3389  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3390  }
3391 
3392  void addMVEVectorIndexOperands(MCInst &Inst, unsigned N) const {
3393  assert(N == 1 && "Invalid number of operands!");
3394  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3395  }
3396 
3397  void addMVEPairVectorIndexOperands(MCInst &Inst, unsigned N) const {
3398  assert(N == 1 && "Invalid number of operands!");
3399  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3400  }
3401 
3402  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
3403  assert(N == 1 && "Invalid number of operands!");
3404  // The immediate encodes the type of constant as well as the value.
3405  // Mask in that this is an i8 splat.
3406  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3407  Inst.addOperand(MCOperand::createImm(CE->getValue() | 0xe00));
3408  }
3409 
3410  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
3411  assert(N == 1 && "Invalid number of operands!");
3412  // The immediate encodes the type of constant as well as the value.
3413  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3414  unsigned Value = CE->getValue();
3417  }
3418 
3419  void addNEONi16splatNotOperands(MCInst &Inst, unsigned N) const {
3420  assert(N == 1 && "Invalid number of operands!");
3421  // The immediate encodes the type of constant as well as the value.
3422  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3423  unsigned Value = CE->getValue();
3424  Value = ARM_AM::encodeNEONi16splat(~Value & 0xffff);
3426  }
3427 
3428  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
3429  assert(N == 1 && "Invalid number of operands!");
3430  // The immediate encodes the type of constant as well as the value.
3431  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3432  unsigned Value = CE->getValue();
3435  }
3436 
3437  void addNEONi32splatNotOperands(MCInst &Inst, unsigned N) const {
3438  assert(N == 1 && "Invalid number of operands!");
3439  // The immediate encodes the type of constant as well as the value.
3440  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3441  unsigned Value = CE->getValue();
3444  }
3445 
3446  void addNEONi8ReplicateOperands(MCInst &Inst, bool Inv) const {
3447  // The immediate encodes the type of constant as well as the value.
3448  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3449  assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
3450  Inst.getOpcode() == ARM::VMOVv16i8) &&
3451  "All instructions that wants to replicate non-zero byte "
3452  "always must be replaced with VMOVv8i8 or VMOVv16i8.");
3453  unsigned Value = CE->getValue();
3454  if (Inv)
3455  Value = ~Value;
3456  unsigned B = Value & 0xff;
3457  B |= 0xe00; // cmode = 0b1110
3459  }
3460 
3461  void addNEONinvi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3462  assert(N == 1 && "Invalid number of operands!");
3463  addNEONi8ReplicateOperands(Inst, true);
3464  }
3465 
3466  static unsigned encodeNeonVMOVImmediate(unsigned Value) {
3467  if (Value >= 256 && Value <= 0xffff)
3468  Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
3469  else if (Value > 0xffff && Value <= 0xffffff)
3470  Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
3471  else if (Value > 0xffffff)
3472  Value = (Value >> 24) | 0x600;
3473  return Value;
3474  }
3475 
3476  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
3477  assert(N == 1 && "Invalid number of operands!");
3478  // The immediate encodes the type of constant as well as the value.
3479  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3480  unsigned Value = encodeNeonVMOVImmediate(CE->getValue());
3482  }
3483 
3484  void addNEONvmovi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3485  assert(N == 1 && "Invalid number of operands!");
3486  addNEONi8ReplicateOperands(Inst, false);
3487  }
3488 
3489  void addNEONvmovi16ReplicateOperands(MCInst &Inst, unsigned N) const {
3490  assert(N == 1 && "Invalid number of operands!");
3491  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3492  assert((Inst.getOpcode() == ARM::VMOVv4i16 ||
3493  Inst.getOpcode() == ARM::VMOVv8i16 ||
3494  Inst.getOpcode() == ARM::VMVNv4i16 ||
3495  Inst.getOpcode() == ARM::VMVNv8i16) &&
3496  "All instructions that want to replicate non-zero half-word "
3497  "always must be replaced with V{MOV,MVN}v{4,8}i16.");
3498  uint64_t Value = CE->getValue();
3499  unsigned Elem = Value & 0xffff;
3500  if (Elem >= 256)
3501  Elem = (Elem >> 8) | 0x200;
3502  Inst.addOperand(MCOperand::createImm(Elem));
3503  }
3504 
3505  void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
3506  assert(N == 1 && "Invalid number of operands!");
3507  // The immediate encodes the type of constant as well as the value.
3508  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3509  unsigned Value = encodeNeonVMOVImmediate(~CE->getValue());
3511  }
3512 
3513  void addNEONvmovi32ReplicateOperands(MCInst &Inst, unsigned N) const {
3514  assert(N == 1 && "Invalid number of operands!");
3515  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3516  assert((Inst.getOpcode() == ARM::VMOVv2i32 ||
3517  Inst.getOpcode() == ARM::VMOVv4i32 ||
3518  Inst.getOpcode() == ARM::VMVNv2i32 ||
3519  Inst.getOpcode() == ARM::VMVNv4i32) &&
3520  "All instructions that want to replicate non-zero word "
3521  "always must be replaced with V{MOV,MVN}v{2,4}i32.");
3522  uint64_t Value = CE->getValue();
3523  unsigned Elem = encodeNeonVMOVImmediate(Value & 0xffffffff);
3524  Inst.addOperand(MCOperand::createImm(Elem));
3525  }
3526 
3527  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
3528  assert(N == 1 && "Invalid number of operands!");
3529  // The immediate encodes the type of constant as well as the value.
3530  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3531  uint64_t Value = CE->getValue();
3532  unsigned Imm = 0;
3533  for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
3534  Imm |= (Value & 1) << i;
3535  }
3536  Inst.addOperand(MCOperand::createImm(Imm | 0x1e00));
3537  }
3538 
3539  void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
3540  assert(N == 1 && "Invalid number of operands!");
3541  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3542  Inst.addOperand(MCOperand::createImm(CE->getValue() / 90));
3543  }
3544 
3545  void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
3546  assert(N == 1 && "Invalid number of operands!");
3547  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3548  Inst.addOperand(MCOperand::createImm((CE->getValue() - 90) / 180));
3549  }
3550 
3551  void addMveSaturateOperands(MCInst &Inst, unsigned N) const {
3552  assert(N == 1 && "Invalid number of operands!");
3553  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3554  unsigned Imm = CE->getValue();
3555  assert((Imm == 48 || Imm == 64) && "Invalid saturate operand");
3556  Inst.addOperand(MCOperand::createImm(Imm == 48 ? 1 : 0));
3557  }
3558 
3559  void print(raw_ostream &OS) const override;
3560 
3561  static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S) {
3562  auto Op = std::make_unique<ARMOperand>(k_ITCondMask);
3563  Op->ITMask.Mask = Mask;
3564  Op->StartLoc = S;
3565  Op->EndLoc = S;
3566  return Op;
3567  }
3568 
3569  static std::unique_ptr<ARMOperand> CreateCondCode(ARMCC::CondCodes CC,
3570  SMLoc S) {
3571  auto Op = std::make_unique<ARMOperand>(k_CondCode);
3572  Op->CC.Val = CC;
3573  Op->StartLoc = S;
3574  Op->EndLoc = S;
3575  return Op;
3576  }
3577 
3578  static std::unique_ptr<ARMOperand> CreateVPTPred(ARMVCC::VPTCodes CC,
3579  SMLoc S) {
3580  auto Op = std::make_unique<ARMOperand>(k_VPTPred);
3581  Op->VCC.Val = CC;
3582  Op->StartLoc = S;
3583  Op->EndLoc = S;
3584  return Op;
3585  }
3586 
3587  static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S) {
3588  auto Op = std::make_unique<ARMOperand>(k_CoprocNum);
3589  Op->Cop.Val = CopVal;
3590  Op->StartLoc = S;
3591  Op->EndLoc = S;
3592  return Op;
3593  }
3594 
3595  static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S) {
3596  auto Op = std::make_unique<ARMOperand>(k_CoprocReg);
3597  Op->Cop.Val = CopVal;
3598  Op->StartLoc = S;
3599  Op->EndLoc = S;
3600  return Op;
3601  }
3602 
3603  static std::unique_ptr<ARMOperand> CreateCoprocOption(unsigned Val, SMLoc S,
3604  SMLoc E) {
3605  auto Op = std::make_unique<ARMOperand>(k_CoprocOption);
3606  Op->Cop.Val = Val;
3607  Op->StartLoc = S;
3608  Op->EndLoc = E;
3609  return Op;
3610  }
3611 
3612  static std::unique_ptr<ARMOperand> CreateCCOut(unsigned RegNum, SMLoc S) {
3613  auto Op = std::make_unique<ARMOperand>(k_CCOut);
3614  Op->Reg.RegNum = RegNum;
3615  Op->StartLoc = S;
3616  Op->EndLoc = S;
3617  return Op;
3618  }
3619 
3620  static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S) {
3621  auto Op = std::make_unique<ARMOperand>(k_Token);
3622  Op->Tok.Data = Str.data();
3623  Op->Tok.Length = Str.size();
3624  Op->StartLoc = S;
3625  Op->EndLoc = S;
3626  return Op;
3627  }
3628 
3629  static std::unique_ptr<ARMOperand> CreateReg(unsigned RegNum, SMLoc S,
3630  SMLoc E) {
3631  auto Op = std::make_unique<ARMOperand>(k_Register);
3632  Op->Reg.RegNum = RegNum;
3633  Op->StartLoc = S;
3634  Op->EndLoc = E;
3635  return Op;
3636  }
3637 
3638  static std::unique_ptr<ARMOperand>
3639  CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
3640  unsigned ShiftReg, unsigned ShiftImm, SMLoc S,
3641  SMLoc E) {
3642  auto Op = std::make_unique<ARMOperand>(k_ShiftedRegister);
3643  Op->RegShiftedReg.ShiftTy = ShTy;
3644  Op->RegShiftedReg.SrcReg = SrcReg;
3645  Op->RegShiftedReg.ShiftReg = ShiftReg;
3646  Op->RegShiftedReg.ShiftImm = ShiftImm;
3647  Op->StartLoc = S;
3648  Op->EndLoc = E;
3649  return Op;
3650  }
3651 
3652  static std::unique_ptr<ARMOperand>
3653  CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
3654  unsigned ShiftImm, SMLoc S, SMLoc E) {
3655  auto Op = std::make_unique<ARMOperand>(k_ShiftedImmediate);
3656  Op->RegShiftedImm.ShiftTy = ShTy;
3657  Op->RegShiftedImm.SrcReg = SrcReg;
3658  Op->RegShiftedImm.ShiftImm = ShiftImm;
3659  Op->StartLoc = S;
3660  Op->EndLoc = E;
3661  return Op;
3662  }
3663 
3664  static std::unique_ptr<ARMOperand> CreateShifterImm(bool isASR, unsigned Imm,
3665  SMLoc S, SMLoc E) {
3666  auto Op = std::make_unique<ARMOperand>(k_ShifterImmediate);
3667  Op->ShifterImm.isASR = isASR;
3668  Op->ShifterImm.Imm = Imm;
3669  Op->StartLoc = S;
3670  Op->EndLoc = E;
3671  return Op;
3672  }
3673 
3674  static std::unique_ptr<ARMOperand> CreateRotImm(unsigned Imm, SMLoc S,
3675  SMLoc E) {
3676  auto Op = std::make_unique<ARMOperand>(k_RotateImmediate);
3677  Op->RotImm.Imm = Imm;
3678  Op->StartLoc = S;
3679  Op->EndLoc = E;
3680  return Op;
3681  }
3682 
3683  static std::unique_ptr<ARMOperand> CreateModImm(unsigned Bits, unsigned Rot,
3684  SMLoc S, SMLoc E) {
3685  auto Op = std::make_unique<ARMOperand>(k_ModifiedImmediate);
3686  Op->ModImm.Bits = Bits;
3687  Op->ModImm.Rot = Rot;
3688  Op->StartLoc = S;
3689  Op->EndLoc = E;
3690  return Op;
3691  }
3692 
3693  static std::unique_ptr<ARMOperand>
3694  CreateConstantPoolImm(const MCExpr *Val, SMLoc S, SMLoc E) {
3695  auto Op = std::make_unique<ARMOperand>(k_ConstantPoolImmediate);
3696  Op->Imm.Val = Val;
3697  Op->StartLoc = S;
3698  Op->EndLoc = E;
3699  return Op;
3700  }
3701 
3702  static std::unique_ptr<ARMOperand>
3703  CreateBitfield(unsigned LSB, unsigned Width, SMLoc S, SMLoc E) {
3704  auto Op = std::make_unique<ARMOperand>(k_BitfieldDescriptor);
3705  Op->Bitfield.LSB = LSB;
3706  Op->Bitfield.Width = Width;
3707  Op->StartLoc = S;
3708  Op->EndLoc = E;
3709  return Op;
3710  }
3711 
3712  static std::unique_ptr<ARMOperand>
3713  CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
3714  SMLoc StartLoc, SMLoc EndLoc) {
3715  assert(Regs.size() > 0 && "RegList contains no registers?");
3716  KindTy Kind = k_RegisterList;
3717 
3718  if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
3719  Regs.front().second)) {
3720  if (Regs.back().second == ARM::VPR)
3721  Kind = k_FPDRegisterListWithVPR;
3722  else
3723  Kind = k_DPRRegisterList;
3724  } else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
3725  Regs.front().second)) {
3726  if (Regs.back().second == ARM::VPR)
3727  Kind = k_FPSRegisterListWithVPR;
3728  else
3729  Kind = k_SPRRegisterList;
3730  }
3731 
3732  if (Kind == k_RegisterList && Regs.back().second == ARM::APSR)
3733  Kind = k_RegisterListWithAPSR;
3734 
3735  assert(llvm::is_sorted(Regs) && "Register list must be sorted by encoding");
3736 
3737  auto Op = std::make_unique<ARMOperand>(Kind);
3738  for (const auto &P : Regs)
3739  Op->Registers.push_back(P.second);
3740 
3741  Op->StartLoc = StartLoc;
3742  Op->EndLoc = EndLoc;
3743  return Op;
3744  }
3745 
3746  static std::unique_ptr<ARMOperand> CreateVectorList(unsigned RegNum,
3747  unsigned Count,
3748  bool isDoubleSpaced,
3749  SMLoc S, SMLoc E) {
3750  auto Op = std::make_unique<ARMOperand>(k_VectorList);
3751  Op->VectorList.RegNum = RegNum;
3752  Op->VectorList.Count = Count;
3753  Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3754  Op->StartLoc = S;
3755  Op->EndLoc = E;
3756  return Op;
3757  }
3758 
3759  static std::unique_ptr<ARMOperand>
3760  CreateVectorListAllLanes(unsigned RegNum, unsigned Count, bool isDoubleSpaced,
3761  SMLoc S, SMLoc E) {
3762  auto Op = std::make_unique<ARMOperand>(k_VectorListAllLanes);
3763  Op->VectorList.RegNum = RegNum;
3764  Op->VectorList.Count = Count;
3765  Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3766  Op->StartLoc = S;
3767  Op->EndLoc = E;
3768  return Op;
3769  }
3770 
3771  static std::unique_ptr<ARMOperand>
3772  CreateVectorListIndexed(unsigned RegNum, unsigned Count, unsigned Index,
3773  bool isDoubleSpaced, SMLoc S, SMLoc E) {
3774  auto Op = std::make_unique<ARMOperand>(k_VectorListIndexed);
3775  Op->VectorList.RegNum = RegNum;
3776  Op->VectorList.Count = Count;
3777  Op->VectorList.LaneIndex = Index;
3778  Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3779  Op->StartLoc = S;
3780  Op->EndLoc = E;
3781  return Op;
3782  }
3783 
3784  static std::unique_ptr<ARMOperand>
3785  CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
3786  auto Op = std::make_unique<ARMOperand>(k_VectorIndex);
3787  Op->VectorIndex.Val = Idx;
3788  Op->StartLoc = S;
3789  Op->EndLoc = E;
3790  return Op;
3791  }
3792 
3793  static std::unique_ptr<ARMOperand> CreateImm(const MCExpr *Val, SMLoc S,
3794  SMLoc E) {
3795  auto Op = std::make_unique<ARMOperand>(k_Immediate);
3796  Op->Imm.Val = Val;
3797  Op->StartLoc = S;
3798  Op->EndLoc = E;
3799  return Op;
3800  }
3801 
3802  static std::unique_ptr<ARMOperand>
3803  CreateMem(unsigned BaseRegNum, const MCExpr *OffsetImm, unsigned OffsetRegNum,
3804  ARM_AM::ShiftOpc ShiftType, unsigned ShiftImm, unsigned Alignment,
3805  bool isNegative, SMLoc S, SMLoc E, SMLoc AlignmentLoc = SMLoc()) {
3806  auto Op = std::make_unique<ARMOperand>(k_Memory);
3807  Op->Memory.BaseRegNum = BaseRegNum;
3808  Op->Memory.OffsetImm = OffsetImm;
3809  Op->Memory.OffsetRegNum = OffsetRegNum;
3810  Op->Memory.ShiftType = ShiftType;
3811  Op->Memory.ShiftImm = ShiftImm;
3812  Op->Memory.Alignment = Alignment;
3813  Op->Memory.isNegative = isNegative;
3814  Op->StartLoc = S;
3815  Op->EndLoc = E;
3816  Op->AlignmentLoc = AlignmentLoc;
3817  return Op;
3818  }
3819 
3820  static std::unique_ptr<ARMOperand>
3821  CreatePostIdxReg(unsigned RegNum, bool isAdd, ARM_AM::ShiftOpc ShiftTy,
3822  unsigned ShiftImm, SMLoc S, SMLoc E) {
3823  auto Op = std::make_unique<ARMOperand>(k_PostIndexRegister);
3824  Op->PostIdxReg.RegNum = RegNum;
3825  Op->PostIdxReg.isAdd = isAdd;
3826  Op->PostIdxReg.ShiftTy = ShiftTy;
3827  Op->PostIdxReg.ShiftImm = ShiftImm;
3828  Op->StartLoc = S;
3829  Op->EndLoc = E;
3830  return Op;
3831  }
3832 
3833  static std::unique_ptr<ARMOperand> CreateMemBarrierOpt(ARM_MB::MemBOpt Opt,
3834  SMLoc S) {
3835  auto Op = std::make_unique<ARMOperand>(k_MemBarrierOpt);
3836  Op->MBOpt.Val = Opt;
3837  Op->StartLoc = S;
3838  Op->EndLoc = S;
3839  return Op;
3840  }
3841 
3842  static std::unique_ptr<ARMOperand>
3843  CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S) {
3844  auto Op = std::make_unique<ARMOperand>(k_InstSyncBarrierOpt);
3845  Op->ISBOpt.Val = Opt;
3846  Op->StartLoc = S;
3847  Op->EndLoc = S;
3848  return Op;
3849  }
3850 
3851  static std::unique_ptr<ARMOperand>
3852  CreateTraceSyncBarrierOpt(ARM_TSB::TraceSyncBOpt Opt, SMLoc S) {
3853  auto Op = std::make_unique<ARMOperand>(k_TraceSyncBarrierOpt);
3854  Op->TSBOpt.Val = Opt;
3855  Op->StartLoc = S;
3856  Op->EndLoc = S;
3857  return Op;
3858  }
3859 
3860  static std::unique_ptr<ARMOperand> CreateProcIFlags(ARM_PROC::IFlags IFlags,
3861  SMLoc S) {
3862  auto Op = std::make_unique<ARMOperand>(k_ProcIFlags);
3863  Op->IFlags.Val = IFlags;
3864  Op->StartLoc = S;
3865  Op->EndLoc = S;
3866  return Op;
3867  }
3868 
3869  static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S) {
3870  auto Op = std::make_unique<ARMOperand>(k_MSRMask);
3871  Op->MMask.Val = MMask;
3872  Op->StartLoc = S;
3873  Op->EndLoc = S;
3874  return Op;
3875  }
3876 
3877  static std::unique_ptr<ARMOperand> CreateBankedReg(unsigned Reg, SMLoc S) {
3878  auto Op = std::make_unique<ARMOperand>(k_BankedReg);
3879  Op->BankedReg.Val = Reg;
3880  Op->StartLoc = S;
3881  Op->EndLoc = S;
3882  return Op;
3883  }
3884 };
3885 
3886 } // end anonymous namespace.
3887 
3888 void ARMOperand::print(raw_ostream &OS) const {
3889  auto RegName = [](unsigned Reg) {
3890  if (Reg)
3892  else
3893  return "noreg";
3894  };
3895 
3896  switch (Kind) {
3897  case k_CondCode:
3898  OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
3899  break;
3900  case k_VPTPred:
3901  OS << "<ARMVCC::" << ARMVPTPredToString(getVPTPred()) << ">";
3902  break;
3903  case k_CCOut:
3904  OS << "<ccout " << RegName(getReg()) << ">";
3905  break;
3906  case k_ITCondMask: {
3907  static const char *const MaskStr[] = {
3908  "(invalid)", "(tttt)", "(ttt)", "(ttte)",
3909  "(tt)", "(ttet)", "(tte)", "(ttee)",
3910  "(t)", "(tett)", "(tet)", "(tete)",
3911  "(te)", "(teet)", "(tee)", "(teee)",
3912  };
3913  assert((ITMask.Mask & 0xf) == ITMask.Mask);
3914  OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
3915  break;
3916  }
3917  case k_CoprocNum:
3918  OS << "<coprocessor number: " << getCoproc() << ">";
3919  break;
3920  case k_CoprocReg:
3921  OS << "<coprocessor register: " << getCoproc() << ">";
3922  break;
3923  case k_CoprocOption:
3924  OS << "<coprocessor option: " << CoprocOption.Val << ">";
3925  break;
3926  case k_MSRMask:
3927  OS << "<mask: " << getMSRMask() << ">";
3928  break;
3929  case k_BankedReg:
3930  OS << "<banked reg: " << getBankedReg() << ">";
3931  break;
3932  case k_Immediate:
3933  OS << *getImm();
3934  break;
3935  case k_MemBarrierOpt:
3936  OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">";
3937  break;
3938  case k_InstSyncBarrierOpt:
3939  OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">";
3940  break;
3941  case k_TraceSyncBarrierOpt:
3942  OS << "<ARM_TSB::" << TraceSyncBOptToString(getTraceSyncBarrierOpt()) << ">";
3943  break;
3944  case k_Memory:
3945  OS << "<memory";
3946  if (Memory.BaseRegNum)
3947  OS << " base:" << RegName(Memory.BaseRegNum);
3948  if (Memory.OffsetImm)
3949  OS << " offset-imm:" << *Memory.OffsetImm;
3950  if (Memory.OffsetRegNum)
3951  OS << " offset-reg:" << (Memory.isNegative ? "-" : "")
3952  << RegName(Memory.OffsetRegNum);
3953  if (Memory.ShiftType != ARM_AM::no_shift) {
3954  OS << " shift-type:" << ARM_AM::getShiftOpcStr(Memory.ShiftType);
3955  OS << " shift-imm:" << Memory.ShiftImm;
3956  }
3957  if (Memory.Alignment)
3958  OS << " alignment:" << Memory.Alignment;
3959  OS << ">";
3960  break;
3961  case k_PostIndexRegister:
3962  OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
3963  << RegName(PostIdxReg.RegNum);
3964  if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
3965  OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
3966  << PostIdxReg.ShiftImm;
3967  OS << ">";
3968  break;
3969  case k_ProcIFlags: {
3970  OS << "<ARM_PROC::";
3971  unsigned IFlags = getProcIFlags();
3972  for (int i=2; i >= 0; --i)
3973  if (IFlags & (1 << i))
3974  OS << ARM_PROC::IFlagsToString(1 << i);
3975  OS << ">";
3976  break;
3977  }
3978  case k_Register:
3979  OS << "<register " << RegName(getReg()) << ">";
3980  break;
3981  case k_ShifterImmediate:
3982  OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
3983  << " #" << ShifterImm.Imm << ">";
3984  break;
3985  case k_ShiftedRegister:
3986  OS << "<so_reg_reg " << RegName(RegShiftedReg.SrcReg) << " "
3987  << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy) << " "
3988  << RegName(RegShiftedReg.ShiftReg) << ">";
3989  break;
3990  case k_ShiftedImmediate:
3991  OS << "<so_reg_imm " << RegName(RegShiftedImm.SrcReg) << " "
3992  << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy) << " #"
3993  << RegShiftedImm.ShiftImm << ">";
3994  break;
3995  case k_RotateImmediate:
3996  OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
3997  break;
3998  case k_ModifiedImmediate:
3999  OS << "<mod_imm #" << ModImm.Bits << ", #"
4000  << ModImm.Rot << ")>";
4001  break;
4002  case k_ConstantPoolImmediate:
4003  OS << "<constant_pool_imm #" << *getConstantPoolImm();
4004  break;
4005  case k_BitfieldDescriptor:
4006  OS << "<bitfield " << "lsb: " << Bitfield.LSB
4007  << ", width: " << Bitfield.Width << ">";
4008  break;
4009  case k_RegisterList:
4010  case k_RegisterListWithAPSR:
4011  case k_DPRRegisterList:
4012  case k_SPRRegisterList:
4013  case k_FPSRegisterListWithVPR:
4014  case k_FPDRegisterListWithVPR: {
4015  OS << "<register_list ";
4016 
4017  const SmallVectorImpl<unsigned> &RegList = getRegList();
4019  I = RegList.begin(), E = RegList.end(); I != E; ) {
4020  OS << RegName(*I);
4021  if (++I < E) OS << ", ";
4022  }
4023 
4024  OS << ">";
4025  break;
4026  }
4027  case k_VectorList:
4028  OS << "<vector_list " << VectorList.Count << " * "
4029  << RegName(VectorList.RegNum) << ">";
4030  break;
4031  case k_VectorListAllLanes:
4032  OS << "<vector_list(all lanes) " << VectorList.Count << " * "
4033  << RegName(VectorList.RegNum) << ">";
4034  break;
4035  case k_VectorListIndexed:
4036  OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
4037  << VectorList.Count << " * " << RegName(VectorList.RegNum) << ">";
4038  break;
4039  case k_Token:
4040  OS << "'" << getToken() << "'";
4041  break;
4042  case k_VectorIndex:
4043  OS << "<vectorindex " << getVectorIndex() << ">";
4044  break;
4045  }
4046 }
4047 
4048 /// @name Auto-generated Match Functions
4049 /// {
4050 
4051 static unsigned MatchRegisterName(StringRef Name);
4052 
4053 /// }
4054 
4055 bool ARMAsmParser::ParseRegister(unsigned &RegNo,
4056  SMLoc &StartLoc, SMLoc &EndLoc) {
4057  const AsmToken &Tok = getParser().getTok();
4058  StartLoc = Tok.getLoc();
4059  EndLoc = Tok.getEndLoc();
4060  RegNo = tryParseRegister();
4061 
4062  return (RegNo == (unsigned)-1);
4063 }
4064 
4065 OperandMatchResultTy ARMAsmParser::tryParseRegister(unsigned &RegNo,
4066  SMLoc &StartLoc,
4067  SMLoc &EndLoc) {
4068  if (ParseRegister(RegNo, StartLoc, EndLoc))
4069  return MatchOperand_NoMatch;
4070  return MatchOperand_Success;
4071 }
4072 
4073 /// Try to parse a register name. The token must be an Identifier when called,
4074 /// and if it is a register name the token is eaten and the register number is
4075 /// returned. Otherwise return -1.
4076 int ARMAsmParser::tryParseRegister() {
4077  MCAsmParser &Parser = getParser();
4078  const AsmToken &Tok = Parser.getTok();
4079  if (Tok.isNot(AsmToken::Identifier)) return -1;
4080 
4081  std::string lowerCase = Tok.getString().lower();
4082  unsigned RegNum = MatchRegisterName(lowerCase);
4083  if (!RegNum) {
4084  RegNum = StringSwitch<unsigned>(lowerCase)
4085  .Case("r13", ARM::SP)
4086  .Case("r14", ARM::LR)
4087  .Case("r15", ARM::PC)
4088  .Case("ip", ARM::R12)
4089  // Additional register name aliases for 'gas' compatibility.
4090  .Case("a1", ARM::R0)
4091  .Case("a2", ARM::R1)
4092  .Case("a3", ARM::R2)
4093  .Case("a4", ARM::R3)
4094  .Case("v1", ARM::R4)
4095  .Case("v2", ARM::R5)
4096  .Case("v3", ARM::R6)
4097  .Case("v4", ARM::R7)
4098  .Case("v5", ARM::R8)
4099  .Case("v6", ARM::R9)
4100  .Case("v7", ARM::R10)
4101  .Case("v8", ARM::R11)
4102  .Case("sb", ARM::R9)
4103  .Case("sl", ARM::R10)
4104  .Case("fp", ARM::R11)
4105  .Default(0);
4106  }
4107  if (!RegNum) {
4108  // Check for aliases registered via .req. Canonicalize to lower case.
4109  // That's more consistent since register names are case insensitive, and
4110  // it's how the original entry was passed in from MC/MCParser/AsmParser.
4111  StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
4112  // If no match, return failure.
4113  if (Entry == RegisterReqs.end())
4114  return -1;
4115  Parser.Lex(); // Eat identifier token.
4116  return Entry->getValue();
4117  }
4118 
4119  // Some FPUs only have 16 D registers, so D16-D31 are invalid
4120  if (!hasD32() && RegNum >= ARM::D16 && RegNum <= ARM::D31)
4121  return -1;
4122 
4123  Parser.Lex(); // Eat identifier token.
4124 
4125  return RegNum;
4126 }
4127 
4128 // Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0.
4129 // If a recoverable error occurs, return 1. If an irrecoverable error
4130 // occurs, return -1. An irrecoverable error is one where tokens have been
4131 // consumed in the process of trying to parse the shifter (i.e., when it is
4132 // indeed a shifter operand, but malformed).
4133 int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) {
4134  MCAsmParser &Parser = getParser();
4135  SMLoc S = Parser.getTok().getLoc();
4136  const AsmToken &Tok = Parser.getTok();
4137  if (Tok.isNot(AsmToken::Identifier))
4138  return -1;
4139 
4140  std::string lowerCase = Tok.getString().lower();
4142  .Case("asl", ARM_AM::lsl)
4143  .Case("lsl", ARM_AM::lsl)
4144  .Case("lsr", ARM_AM::lsr)
4145  .Case("asr", ARM_AM::asr)
4146  .Case("ror", ARM_AM::ror)
4147  .Case("rrx", ARM_AM::rrx)
4149 
4150  if (ShiftTy == ARM_AM::no_shift)
4151  return 1;
4152 
4153  Parser.Lex(); // Eat the operator.
4154 
4155  // The source register for the shift has already been added to the
4156  // operand list, so we need to pop it off and combine it into the shifted
4157  // register operand instead.
4158  std::unique_ptr<ARMOperand> PrevOp(
4159  (ARMOperand *)Operands.pop_back_val().release());
4160  if (!PrevOp->isReg())
4161  return Error(PrevOp->getStartLoc(), "shift must be of a register");
4162  int SrcReg = PrevOp->getReg();
4163 
4164  SMLoc EndLoc;
4165  int64_t Imm = 0;
4166  int ShiftReg = 0;
4167  if (ShiftTy == ARM_AM::rrx) {
4168  // RRX Doesn't have an explicit shift amount. The encoder expects
4169  // the shift register to be the same as the source register. Seems odd,
4170  // but OK.
4171  ShiftReg = SrcReg;
4172  } else {
4173  // Figure out if this is shifted by a constant or a register (for non-RRX).
4174  if (Parser.getTok().is(AsmToken::Hash) ||
4175  Parser.getTok().is(AsmToken::Dollar)) {
4176  Parser.Lex(); // Eat hash.
4177  SMLoc ImmLoc = Parser.getTok().getLoc();
4178  const MCExpr *ShiftExpr = nullptr;
4179  if (getParser().parseExpression(ShiftExpr, EndLoc)) {
4180  Error(ImmLoc, "invalid immediate shift value");
4181  return -1;
4182  }
4183  // The expression must be evaluatable as an immediate.
4184  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
4185  if (!CE) {
4186  Error(ImmLoc, "invalid immediate shift value");
4187  return -1;
4188  }
4189  // Range check the immediate.
4190  // lsl, ror: 0 <= imm <= 31
4191  // lsr, asr: 0 <= imm <= 32
4192  Imm = CE->getValue();
4193  if (Imm < 0 ||
4194  ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
4195  ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
4196  Error(ImmLoc, "immediate shift value out of range");
4197  return -1;
4198  }
4199  // shift by zero is a nop. Always send it through as lsl.
4200  // ('as' compatibility)
4201  if (Imm == 0)
4202  ShiftTy = ARM_AM::lsl;
4203  } else if (Parser.getTok().is(AsmToken::Identifier)) {
4204  SMLoc L = Parser.getTok().getLoc();
4205  EndLoc = Parser.getTok().getEndLoc();
4206  ShiftReg = tryParseRegister();
4207  if (ShiftReg == -1) {
4208  Error(L, "expected immediate or register in shift operand");
4209  return -1;
4210  }
4211  } else {
4212  Error(Parser.getTok().getLoc(),
4213  "expected immediate or register in shift operand");
4214  return -1;
4215  }
4216  }
4217 
4218  if (ShiftReg && ShiftTy != ARM_AM::rrx)
4219  Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
4220  ShiftReg, Imm,
4221  S, EndLoc));
4222  else
4223  Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
4224  S, EndLoc));
4225 
4226  return 0;
4227 }
4228 
4229 /// Try to parse a register name. The token must be an Identifier when called.
4230 /// If it's a register, an AsmOperand is created. Another AsmOperand is created
4231 /// if there is a "writeback". 'true' if it's not a register.
4232 ///
4233 /// TODO this is likely to change to allow different register types and or to
4234 /// parse for a specific register type.
4235 bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) {
4236  MCAsmParser &Parser = getParser();
4237  SMLoc RegStartLoc = Parser.getTok().getLoc();
4238  SMLoc RegEndLoc = Parser.getTok().getEndLoc();
4239  int RegNo = tryParseRegister();
4240  if (RegNo == -1)
4241  return true;
4242 
4243  Operands.push_back(ARMOperand::CreateReg(RegNo, RegStartLoc, RegEndLoc));
4244 
4245  const AsmToken &ExclaimTok = Parser.getTok();
4246  if (ExclaimTok.is(AsmToken::Exclaim)) {
4247  Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
4248  ExclaimTok.getLoc()));
4249  Parser.Lex(); // Eat exclaim token
4250  return false;
4251  }
4252 
4253  // Also check for an index operand. This is only legal for vector registers,
4254  // but that'll get caught OK in operand matching, so we don't need to
4255  // explicitly filter everything else out here.
4256  if (Parser.getTok().is(AsmToken::LBrac)) {
4257  SMLoc SIdx = Parser.getTok().getLoc();
4258  Parser.Lex(); // Eat left bracket token.
4259 
4260  const MCExpr *ImmVal;
4261  if (getParser().parseExpression(ImmVal))
4262  return true;
4263  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4264  if (!MCE)
4265  return TokError("immediate value expected for vector index");
4266 
4267  if (Parser.getTok().isNot(AsmToken::RBrac))
4268  return Error(Parser.getTok().getLoc(), "']' expected");
4269 
4270  SMLoc E = Parser.getTok().getEndLoc();
4271  Parser.Lex(); // Eat right bracket token.
4272 
4273  Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
4274  SIdx, E,
4275  getContext()));
4276  }
4277 
4278  return false;
4279 }
4280 
4281 /// MatchCoprocessorOperandName - Try to parse an coprocessor related
4282 /// instruction with a symbolic operand name.
4283 /// We accept "crN" syntax for GAS compatibility.
4284 /// <operand-name> ::= <prefix><number>
4285 /// If CoprocOp is 'c', then:
4286 /// <prefix> ::= c | cr
4287 /// If CoprocOp is 'p', then :
4288 /// <prefix> ::= p
4289 /// <number> ::= integer in range [0, 15]
4290 static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
4291  // Use the same layout as the tablegen'erated register name matcher. Ugly,
4292  // but efficient.
4293  if (Name.size() < 2 || Name[0] != CoprocOp)
4294  return -1;
4295  Name = (Name[1] == 'r') ? Name.drop_front(2) : Name.drop_front();
4296 
4297  switch (Name.size()) {
4298  default: return -1;
4299  case 1:
4300  switch (Name[0]) {
4301  default: return -1;
4302  case '0': return 0;
4303  case '1': return 1;
4304  case '2': return 2;
4305  case '3': return 3;
4306  case '4': return 4;
4307  case '5': return 5;
4308  case '6': return 6;
4309  case '7': return 7;
4310  case '8': return 8;
4311  case '9': return 9;
4312  }
4313  case 2:
4314  if (Name[0] != '1')
4315  return -1;
4316  switch (Name[1]) {
4317  default: return -1;
4318  // CP10 and CP11 are VFP/NEON and so vector instructions should be used.
4319  // However, old cores (v5/v6) did use them in that way.
4320  case '0': return 10;
4321  case '1': return 11;
4322  case '2': return 12;
4323  case '3': return 13;
4324  case '4': return 14;
4325  case '5': return 15;
4326  }
4327  }
4328 }
4329 
4330 /// parseITCondCode - Try to parse a condition code for an IT instruction.
4332 ARMAsmParser::parseITCondCode(OperandVector &Operands) {
4333  MCAsmParser &Parser = getParser();
4334  SMLoc S = Parser.getTok().getLoc();
4335  const AsmToken &Tok = Parser.getTok();
4336  if (!Tok.is(AsmToken::Identifier))
4337  return MatchOperand_NoMatch;
4338  unsigned CC = ARMCondCodeFromString(Tok.getString());
4339  if (CC == ~0U)
4340  return MatchOperand_NoMatch;
4341  Parser.Lex(); // Eat the token.
4342 
4343  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
4344 
4345  return MatchOperand_Success;
4346 }
4347 
4348 /// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
4349 /// token must be an Identifier when called, and if it is a coprocessor
4350 /// number, the token is eaten and the operand is added to the operand list.
4352 ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) {
4353  MCAsmParser &Parser = getParser();
4354  SMLoc S = Parser.getTok().getLoc();
4355  const AsmToken &Tok = Parser.getTok();
4356  if (Tok.isNot(AsmToken::Identifier))
4357  return MatchOperand_NoMatch;
4358 
4359  int Num = MatchCoprocessorOperandName(Tok.getString().lower(), 'p');
4360  if (Num == -1)
4361  return MatchOperand_NoMatch;
4362  if (!isValidCoprocessorNumber(Num, getSTI().getFeatureBits()))
4363  return MatchOperand_NoMatch;
4364 
4365  Parser.Lex(); // Eat identifier token.
4366  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
4367  return MatchOperand_Success;
4368 }
4369 
4370 /// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
4371 /// token must be an Identifier when called, and if it is a coprocessor
4372 /// number, the token is eaten and the operand is added to the operand list.
4374 ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) {
4375  MCAsmParser &Parser = getParser();
4376  SMLoc S = Parser.getTok().getLoc();
4377  const AsmToken &Tok = Parser.getTok();
4378  if (Tok.isNot(AsmToken::Identifier))
4379  return MatchOperand_NoMatch;
4380 
4381  int Reg = MatchCoprocessorOperandName(Tok.getString().lower(), 'c');
4382  if (Reg == -1)
4383  return MatchOperand_NoMatch;
4384 
4385  Parser.Lex(); // Eat identifier token.
4386  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
4387  return MatchOperand_Success;
4388 }
4389 
4390 /// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
4391 /// coproc_option : '{' imm0_255 '}'
4393 ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands) {
4394  MCAsmParser &Parser = getParser();
4395  SMLoc S = Parser.getTok().getLoc();
4396 
4397  // If this isn't a '{', this isn't a coprocessor immediate operand.
4398  if (Parser.getTok().isNot(AsmToken::LCurly))
4399  return MatchOperand_NoMatch;
4400  Parser.Lex(); // Eat the '{'
4401 
4402  const MCExpr *Expr;
4403  SMLoc Loc = Parser.getTok().getLoc();
4404  if (getParser().parseExpression(Expr)) {
4405  Error(Loc, "illegal expression");
4406  return MatchOperand_ParseFail;
4407  }
4408  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4409  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
4410  Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
4411  return MatchOperand_ParseFail;
4412  }
4413  int Val = CE->getValue();
4414 
4415  // Check for and consume the closing '}'
4416  if (Parser.getTok().isNot(AsmToken::RCurly))
4417  return MatchOperand_ParseFail;
4418  SMLoc E = Parser.getTok().getEndLoc();
4419  Parser.Lex(); // Eat the '}'
4420 
4421  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
4422  return MatchOperand_Success;
4423 }
4424 
4425 // For register list parsing, we need to map from raw GPR register numbering
4426 // to the enumeration values. The enumeration values aren't sorted by
4427 // register number due to our using "sp", "lr" and "pc" as canonical names.
4428 static unsigned getNextRegister(unsigned Reg) {
4429  // If this is a GPR, we need to do it manually, otherwise we can rely
4430  // on the sort ordering of the enumeration since the other reg-classes
4431  // are sane.
4432  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4433  return Reg + 1;
4434  switch(Reg) {
4435  default: llvm_unreachable("Invalid GPR number!");
4436  case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2;
4437  case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4;
4438  case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6;
4439  case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8;
4440  case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10;
4441  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
4442  case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR;
4443  case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0;
4444  }
4445 }
4446 
4447 // Insert an <Encoding, Register> pair in an ordered vector. Return true on
4448 // success, or false, if duplicate encoding found.
4449 static bool
4450 insertNoDuplicates(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
4451  unsigned Enc, unsigned Reg) {
4452  Regs.emplace_back(Enc, Reg);
4453  for (auto I = Regs.rbegin(), J = I + 1, E = Regs.rend(); J != E; ++I, ++J) {
4454  if (J->first == Enc) {
4455  Regs.erase(J.base());
4456  return false;
4457  }
4458  if (J->first < Enc)
4459  break;
4460  std::swap(*I, *J);
4461  }
4462  return true;
4463 }
4464 
4465 /// Parse a register list.
4466 bool ARMAsmParser::parseRegisterList(OperandVector &Operands,
4467  bool EnforceOrder) {
4468  MCAsmParser &Parser = getParser();
4469  if (Parser.getTok().isNot(AsmToken::LCurly))
4470  return TokError("Token is not a Left Curly Brace");
4471  SMLoc S = Parser.getTok().getLoc();
4472  Parser.Lex(); // Eat '{' token.
4473  SMLoc RegLoc = Parser.getTok().getLoc();
4474 
4475  // Check the first register in the list to see what register class
4476  // this is a list of.
4477  int Reg = tryParseRegister();
4478  if (Reg == -1)
4479  return Error(RegLoc, "register expected");
4480 
4481  // The reglist instructions have at most 16 registers, so reserve
4482  // space for that many.
4483  int EReg = 0;
4485 
4486  // Allow Q regs and just interpret them as the two D sub-registers.
4487  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4488  Reg = getDRegFromQReg(Reg);
4489  EReg = MRI->getEncodingValue(Reg);
4490  Registers.emplace_back(EReg, Reg);
4491  ++Reg;
4492  }
4493  const MCRegisterClass *RC;
4494  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4495  RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
4496  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
4497  RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
4498  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
4499  RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
4500  else if (ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
4501  RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4502  else
4503  return Error(RegLoc, "invalid register in register list");
4504 
4505  // Store the register.
4506  EReg = MRI->getEncodingValue(Reg);
4507  Registers.emplace_back(EReg, Reg);
4508 
4509  // This starts immediately after the first register token in the list,
4510  // so we can see either a comma or a minus (range separator) as a legal
4511  // next token.
4512  while (Parser.getTok().is(AsmToken::Comma) ||
4513  Parser.getTok().is(AsmToken::Minus)) {
4514  if (Parser.getTok().is(AsmToken::Minus)) {
4515  Parser.Lex(); // Eat the minus.
4516  SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4517  int EndReg = tryParseRegister();
4518  if (EndReg == -1)
4519  return Error(AfterMinusLoc, "register expected");
4520  // Allow Q regs and just interpret them as the two D sub-registers.
4521  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
4522  EndReg = getDRegFromQReg(EndReg) + 1;
4523  // If the register is the same as the start reg, there's nothing
4524  // more to do.
4525  if (Reg == EndReg)
4526  continue;
4527  // The register must be in the same register class as the first.
4528  if (!RC->contains(EndReg))
4529  return Error(AfterMinusLoc, "invalid register in register list");
4530  // Ranges must go from low to high.
4531  if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
4532  return Error(AfterMinusLoc, "bad range in register list");
4533 
4534  // Add all the registers in the range to the register list.
4535  while (Reg != EndReg) {
4536  Reg = getNextRegister(Reg);
4537  EReg = MRI->getEncodingValue(Reg);
4538  if (!insertNoDuplicates(Registers, EReg, Reg)) {
4539  Warning(AfterMinusLoc, StringRef("duplicated register (") +
4541  ") in register list");
4542  }
4543  }
4544  continue;
4545  }
4546  Parser.Lex(); // Eat the comma.
4547  RegLoc = Parser.getTok().getLoc();
4548  int OldReg = Reg;
4549  const AsmToken RegTok = Parser.getTok();
4550  Reg = tryParseRegister();
4551  if (Reg == -1)
4552  return Error(RegLoc, "register expected");
4553  // Allow Q regs and just interpret them as the two D sub-registers.
4554  bool isQReg = false;
4555  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4556  Reg = getDRegFromQReg(Reg);
4557  isQReg = true;
4558  }
4559  if (!RC->contains(Reg) &&
4560  RC->getID() == ARMMCRegisterClasses[ARM::GPRRegClassID].getID() &&
4561  ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg)) {
4562  // switch the register classes, as GPRwithAPSRnospRegClassID is a partial
4563  // subset of GPRRegClassId except it contains APSR as well.
4564  RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4565  }
4566  if (Reg == ARM::VPR &&
4567  (RC == &ARMMCRegisterClasses[ARM::SPRRegClassID] ||
4568  RC == &ARMMCRegisterClasses[ARM::DPRRegClassID] ||
4569  RC == &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID])) {
4570  RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID];
4571  EReg = MRI->getEncodingValue(Reg);
4572  if (!insertNoDuplicates(Registers, EReg, Reg)) {
4573  Warning(RegLoc, "duplicated register (" + RegTok.getString() +
4574  ") in register list");
4575  }
4576  continue;
4577  }
4578  // The register must be in the same register class as the first.
4579  if (!RC->contains(Reg))
4580  return Error(RegLoc, "invalid register in register list");
4581  // In most cases, the list must be monotonically increasing. An
4582  // exception is CLRM, which is order-independent anyway, so
4583  // there's no potential for confusion if you write clrm {r2,r1}
4584  // instead of clrm {r1,r2}.
4585  if (EnforceOrder &&
4586  MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
4587  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4588  Warning(RegLoc, "register list not in ascending order");
4589  else if (!ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
4590  return Error(RegLoc, "register list not in ascending order");
4591  }
4592  // VFP register lists must also be contiguous.
4593  if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
4594  RC != &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID] &&
4595  Reg != OldReg + 1)
4596  return Error(RegLoc, "non-contiguous register range");
4597  EReg = MRI->getEncodingValue(Reg);
4598  if (!insertNoDuplicates(Registers, EReg, Reg)) {
4599  Warning(RegLoc, "duplicated register (" + RegTok.getString() +
4600  ") in register list");
4601  }
4602  if (isQReg) {
4603  EReg = MRI->getEncodingValue(++Reg);
4604  Registers.emplace_back(EReg, Reg);
4605  }
4606  }
4607 
4608  if (Parser.getTok().isNot(AsmToken::RCurly))
4609  return Error(Parser.getTok().getLoc(), "'}' expected");
4610  SMLoc E = Parser.getTok().getEndLoc();
4611  Parser.Lex(); // Eat '}' token.
4612 
4613  // Push the register list operand.
4614  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
4615 
4616  // The ARM system instruction variants for LDM/STM have a '^' token here.
4617  if (Parser.getTok().is(AsmToken::Caret)) {
4618  Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
4619  Parser.Lex(); // Eat '^' token.
4620  }
4621 
4622  return false;
4623 }
4624 
4625 // Helper function to parse the lane index for vector lists.
4626 OperandMatchResultTy ARMAsmParser::
4627 parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) {
4628  MCAsmParser &Parser = getParser();
4629  Index = 0; // Always return a defined index value.
4630  if (Parser.getTok().is(AsmToken::LBrac)) {
4631  Parser.Lex(); // Eat the '['.
4632  if (Parser.getTok().is(AsmToken::RBrac)) {
4633  // "Dn[]" is the 'all lanes' syntax.
4634  LaneKind = AllLanes;
4635  EndLoc = Parser.getTok().getEndLoc();
4636  Parser.Lex(); // Eat the ']'.
4637  return MatchOperand_Success;
4638  }
4639 
4640  // There's an optional '#' token here. Normally there wouldn't be, but
4641  // inline assemble puts one in, and it's friendly to accept that.
4642  if (Parser.getTok().is(AsmToken::Hash))
4643  Parser.Lex(); // Eat '#' or '$'.
4644 
4645  const MCExpr *LaneIndex;
4646  SMLoc Loc = Parser.getTok().getLoc();
4647  if (getParser().parseExpression(LaneIndex)) {
4648  Error(Loc, "illegal expression");
4649  return MatchOperand_ParseFail;
4650  }
4651  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
4652  if (!CE) {
4653  Error(Loc, "lane index must be empty or an integer");
4654  return MatchOperand_ParseFail;
4655  }
4656  if (Parser.getTok().isNot(AsmToken::RBrac)) {
4657  Error(Parser.getTok().getLoc(), "']' expected");
4658  return MatchOperand_ParseFail;
4659  }
4660  EndLoc = Parser.getTok().getEndLoc();
4661  Parser.Lex(); // Eat the ']'.
4662  int64_t Val = CE->getValue();
4663 
4664  // FIXME: Make this range check context sensitive for .8, .16, .32.
4665  if (Val < 0 || Val > 7) {
4666  Error(Parser.getTok().getLoc(), "lane index out of range");
4667  return MatchOperand_ParseFail;
4668  }
4669  Index = Val;
4670  LaneKind = IndexedLane;
4671  return MatchOperand_Success;
4672  }
4673  LaneKind = NoLanes;
4674  return MatchOperand_Success;
4675 }
4676 
4677 // parse a vector register list
4679 ARMAsmParser::parseVectorList(OperandVector &Operands) {
4680  MCAsmParser &Parser = getParser();
4681  VectorLaneTy LaneKind;
4682  unsigned LaneIndex;
4683  SMLoc S = Parser.getTok().getLoc();
4684  // As an extension (to match gas), support a plain D register or Q register
4685  // (without encosing curly braces) as a single or double entry list,
4686  // respectively.
4687  if (!hasMVE() && Parser.getTok().is(AsmToken::Identifier)) {
4688  SMLoc E = Parser.getTok().getEndLoc();
4689  int Reg = tryParseRegister();
4690  if (Reg == -1)
4691  return MatchOperand_NoMatch;
4692  if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
4693  OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
4694  if (Res != MatchOperand_Success)
4695  return Res;
4696  switch (LaneKind) {
4697  case NoLanes:
4698  Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
4699  break;
4700  case AllLanes:
4701  Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
4702  S, E));
4703  break;
4704  case IndexedLane:
4705  Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
4706  LaneIndex,
4707  false, S, E));
4708  break;
4709  }
4710  return MatchOperand_Success;
4711  }
4712  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4713  Reg = getDRegFromQReg(Reg);
4714  OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
4715  if (Res != MatchOperand_Success)
4716  return Res;
4717  switch (LaneKind) {
4718  case NoLanes:
4719  Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
4720  &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4721  Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
4722  break;
4723  case AllLanes:
4724  Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
4725  &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4726  Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
4727  S, E));
4728  break;
4729  case IndexedLane:
4730  Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
4731  LaneIndex,
4732  false, S, E));
4733  break;
4734  }
4735  return MatchOperand_Success;
4736  }
4737  Error(S, "vector register expected");
4738  return MatchOperand_ParseFail;
4739  }
4740 
4741  if (Parser.getTok().isNot(AsmToken::LCurly))
4742  return MatchOperand_NoMatch;
4743 
4744  Parser.Lex(); // Eat '{' token.
4745  SMLoc RegLoc = Parser.getTok().getLoc();
4746 
4747  int Reg = tryParseRegister();
4748  if (Reg == -1) {
4749  Error(RegLoc, "register expected");
4750  return MatchOperand_ParseFail;
4751  }
4752  unsigned Count = 1;
4753  int Spacing = 0;
4754  unsigned FirstReg = Reg;
4755 
4756  if (hasMVE() && !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg)) {
4757  Error(Parser.getTok().getLoc(), "vector register in range Q0-Q7 expected");
4758  return MatchOperand_ParseFail;
4759  }
4760  // The list is of D registers, but we also allow Q regs and just interpret
4761  // them as the two D sub-registers.
4762  else if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4763  FirstReg = Reg = getDRegFromQReg(Reg);
4764  Spacing = 1; // double-spacing requires explicit D registers, otherwise
4765  // it's ambiguous with four-register single spaced.
4766  ++Reg;
4767  ++Count;
4768  }
4769 
4770  SMLoc E;
4771  if (parseVectorLane(LaneKind, LaneIndex, E) != MatchOperand_Success)
4772  return MatchOperand_ParseFail;
4773 
4774  while (Parser.getTok().is(AsmToken::Comma) ||
4775  Parser.getTok().is(AsmToken::Minus)) {
4776  if (Parser.getTok().is(AsmToken::Minus)) {
4777  if (!Spacing)
4778  Spacing = 1; // Register range implies a single spaced list.
4779  else if (Spacing == 2) {
4780  Error(Parser.getTok().getLoc(),
4781  "sequential registers in double spaced list");
4782  return MatchOperand_ParseFail;
4783  }
4784  Parser.Lex(); // Eat the minus.
4785  SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4786  int EndReg = tryParseRegister();
4787  if (EndReg == -1) {
4788  Error(AfterMinusLoc, "register expected");
4789  return MatchOperand_ParseFail;
4790  }
4791  // Allow Q regs and just interpret them as the two D sub-registers.
4792  if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
4793  EndReg = getDRegFromQReg(EndReg) + 1;
4794  // If the register is the same as the start reg, there's nothing
4795  // more to do.
4796  if (Reg == EndReg)
4797  continue;
4798  // The register must be in the same register class as the first.
4799  if ((hasMVE() &&
4800  !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(EndReg)) ||
4801  (!hasMVE() &&
4802  !ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg))) {
4803  Error(AfterMinusLoc, "invalid register in register list");
4804  return MatchOperand_ParseFail;
4805  }
4806  // Ranges must go from low to high.
4807  if (Reg > EndReg) {
4808  Error(AfterMinusLoc, "bad range in register list");
4809  return MatchOperand_ParseFail;
4810  }
4811  // Parse the lane specifier if present.
4812  VectorLaneTy NextLaneKind;
4813  unsigned NextLaneIndex;
4814  if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
4816  return MatchOperand_ParseFail;
4817  if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4818  Error(AfterMinusLoc, "mismatched lane index in register list");
4819  return MatchOperand_ParseFail;
4820  }
4821 
4822  // Add all the registers in the range to the register list.
4823  Count += EndReg - Reg;
4824  Reg = EndReg;
4825  continue;
4826  }
4827  Parser.Lex(); // Eat the comma.
4828  RegLoc = Parser.getTok().getLoc();
4829  int OldReg = Reg;
4830  Reg = tryParseRegister();
4831  if (Reg == -1) {
4832  Error(RegLoc, "register expected");
4833  return MatchOperand_ParseFail;
4834  }
4835 
4836  if (hasMVE()) {
4837  if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg)) {
4838  Error(RegLoc, "vector register in range Q0-Q7 expected");
4839  return MatchOperand_ParseFail;
4840  }
4841  Spacing = 1;
4842  }
4843  // vector register lists must be contiguous.
4844  // It's OK to use the enumeration values directly here rather, as the
4845  // VFP register classes have the enum sorted properly.
4846  //
4847  // The list is of D registers, but we also allow Q regs and just interpret
4848  // them as the two D sub-registers.
4849  else if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4850  if (!Spacing)
4851  Spacing = 1; // Register range implies a single spaced list.
4852  else if (Spacing == 2) {
4853  Error(RegLoc,
4854  "invalid register in double-spaced list (must be 'D' register')");
4855  return MatchOperand_ParseFail;
4856  }
4857  Reg = getDRegFromQReg(Reg);
4858  if (Reg != OldReg + 1) {
4859  Error(RegLoc, "non-contiguous register range");
4860  return MatchOperand_ParseFail;
4861  }
4862  ++Reg;
4863  Count += 2;
4864  // Parse the lane specifier if present.
4865  VectorLaneTy NextLaneKind;
4866  unsigned NextLaneIndex;
4867  SMLoc LaneLoc = Parser.getTok().getLoc();
4868  if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
4870  return MatchOperand_ParseFail;
4871  if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4872  Error(LaneLoc, "mismatched lane index in register list");
4873  return MatchOperand_ParseFail;
4874  }
4875  continue;
4876  }
4877  // Normal D register.
4878  // Figure out the register spacing (single or double) of the list if
4879  // we don't know it already.
4880  if (!Spacing)
4881  Spacing = 1 + (Reg == OldReg + 2);
4882 
4883  // Just check that it's contiguous and keep going.
4884  if (Reg != OldReg + Spacing) {
4885  Error(RegLoc, "non-contiguous register range");
4886  return MatchOperand_ParseFail;
4887  }
4888  ++Count;
4889  // Parse the lane specifier if present.
4890  VectorLaneTy NextLaneKind;
4891  unsigned NextLaneIndex;
4892  SMLoc EndLoc = Parser.getTok().getLoc();
4893  if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != MatchOperand_Success)
4894  return MatchOperand_ParseFail;
4895  if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4896  Error(EndLoc, "mismatched lane index in register list");
4897  return MatchOperand_ParseFail;
4898  }
4899  }
4900 
4901  if (Parser.getTok().isNot(AsmToken::RCurly)) {
4902  Error(Parser.getTok().getLoc(), "'}' expected");
4903  return MatchOperand_ParseFail;
4904  }
4905  E = Parser.getTok().getEndLoc();
4906  Parser.Lex(); // Eat '}' token.
4907 
4908  switch (LaneKind) {
4909  case NoLanes:
4910  case AllLanes: {
4911  // Two-register operands have been converted to the
4912  // composite register classes.
4913  if (Count == 2 && !hasMVE()) {
4914  const MCRegisterClass *RC = (Spacing == 1) ?
4915  &ARMMCRegisterClasses[ARM::DPairRegClassID] :
4916  &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
4917  FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
4918  }
4919  auto Create = (LaneKind == NoLanes ? ARMOperand::CreateVectorList :
4920  ARMOperand::CreateVectorListAllLanes);
4921  Operands.push_back(Create(FirstReg, Count, (Spacing == 2), S, E));
4922  break;
4923  }
4924  case IndexedLane:
4925  Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
4926  LaneIndex,
4927  (Spacing == 2),
4928  S, E));
4929  break;
4930  }
4931  return MatchOperand_Success;
4932 }
4933 
4934 /// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
4936 ARMAsmParser::parseMemBarrierOptOperand(OperandVector &Operands) {
4937  MCAsmParser &Parser = getParser();
4938  SMLoc S = Parser.getTok().getLoc();
4939  const AsmToken &Tok = Parser.getTok();
4940  unsigned Opt;
4941 
4942  if (Tok.is(AsmToken::Identifier)) {
4943  StringRef OptStr = Tok.getString();
4944 
4945  Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
4946  .Case("sy", ARM_MB::SY)
4947  .Case("st", ARM_MB::ST)
4948  .Case("ld", ARM_MB::LD)
4949  .Case("sh", ARM_MB::ISH)
4950  .Case("ish", ARM_MB::ISH)
4951  .Case("shst", ARM_MB::ISHST)
4952  .Case("ishst", ARM_MB::ISHST)
4953  .Case("ishld", ARM_MB::ISHLD)
4954  .Case("nsh", ARM_MB::NSH)
4955  .Case("un", ARM_MB::NSH)
4956  .Case("nshst", ARM_MB::NSHST)
4957  .Case("nshld", ARM_MB::NSHLD)
4958  .Case("unst", ARM_MB::NSHST)
4959  .Case("osh", ARM_MB::OSH)
4960  .Case("oshst", ARM_MB::OSHST)
4961  .Case("oshld", ARM_MB::OSHLD)
4962  .Default(~0U);
4963 
4964  // ishld, oshld, nshld and ld are only available from ARMv8.
4965  if (!hasV8Ops() && (Opt == ARM_MB::ISHLD || Opt == ARM_MB::OSHLD ||
4966  Opt == ARM_MB::NSHLD || Opt == ARM_MB::LD))
4967  Opt = ~0U;
4968 
4969  if (Opt == ~0U)
4970  return MatchOperand_NoMatch;
4971 
4972  Parser.Lex(); // Eat identifier token.
4973  } else if (Tok.is(AsmToken::Hash) ||
4974  Tok.is(AsmToken::Dollar) ||
4975  Tok.is(AsmToken::Integer)) {
4976  if (Parser.getTok().isNot(AsmToken::Integer))
4977  Parser.Lex(); // Eat '#' or '$'.
4978  SMLoc Loc = Parser.getTok().getLoc();
4979 
4980  const MCExpr *MemBarrierID;
4981  if (getParser().parseExpression(MemBarrierID)) {
4982  Error(Loc, "illegal expression");
4983  return MatchOperand_ParseFail;
4984  }
4985 
4986  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
4987  if (!CE) {
4988  Error(Loc, "constant expression expected");
4989  return MatchOperand_ParseFail;
4990  }
4991 
4992  int Val = CE->getValue();
4993  if (Val & ~0xf) {
4994  Error(Loc, "immediate value out of range");
4995  return MatchOperand_ParseFail;
4996  }
4997 
4998  Opt = ARM_MB::RESERVED_0 + Val;
4999  } else
5000  return MatchOperand_ParseFail;
5001 
5002  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
5003  return MatchOperand_Success;
5004 }
5005 
5007 ARMAsmParser::parseTraceSyncBarrierOptOperand(OperandVector &Operands) {
5008  MCAsmParser &Parser = getParser();
5009  SMLoc S = Parser.getTok().getLoc();
5010  const AsmToken &Tok = Parser.getTok();
5011 
5012  if (Tok.isNot(AsmToken::Identifier))
5013  return