LLVM 20.0.0git
ARMAsmParser.cpp
Go to the documentation of this file.
1//===- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions -------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "ARMBaseInstrInfo.h"
10#include "ARMFeatures.h"
17#include "Utils/ARMBaseInfo.h"
18#include "llvm/ADT/APFloat.h"
19#include "llvm/ADT/APInt.h"
20#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SmallSet.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
26#include "llvm/ADT/StringSet.h"
28#include "llvm/ADT/Twine.h"
29#include "llvm/MC/MCContext.h"
30#include "llvm/MC/MCExpr.h"
31#include "llvm/MC/MCInst.h"
32#include "llvm/MC/MCInstrDesc.h"
33#include "llvm/MC/MCInstrInfo.h"
41#include "llvm/MC/MCSection.h"
42#include "llvm/MC/MCStreamer.h"
44#include "llvm/MC/MCSymbol.h"
51#include "llvm/Support/Debug.h"
54#include "llvm/Support/SMLoc.h"
59#include <algorithm>
60#include <cassert>
61#include <cstddef>
62#include <cstdint>
63#include <iterator>
64#include <limits>
65#include <memory>
66#include <optional>
67#include <string>
68#include <utility>
69#include <vector>
70
71#define DEBUG_TYPE "asm-parser"
72
73using namespace llvm;
74
75namespace {
76class ARMOperand;
77
78enum class ImplicitItModeTy { Always, Never, ARMOnly, ThumbOnly };
79
80static cl::opt<ImplicitItModeTy> ImplicitItMode(
81 "arm-implicit-it", cl::init(ImplicitItModeTy::ARMOnly),
82 cl::desc("Allow conditional instructions outside of an IT block"),
83 cl::values(clEnumValN(ImplicitItModeTy::Always, "always",
84 "Accept in both ISAs, emit implicit ITs in Thumb"),
85 clEnumValN(ImplicitItModeTy::Never, "never",
86 "Warn in ARM, reject in Thumb"),
87 clEnumValN(ImplicitItModeTy::ARMOnly, "arm",
88 "Accept in ARM, reject in Thumb"),
89 clEnumValN(ImplicitItModeTy::ThumbOnly, "thumb",
90 "Warn in ARM, emit implicit ITs in Thumb")));
91
92static cl::opt<bool> AddBuildAttributes("arm-add-build-attributes",
93 cl::init(false));
94
95enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
96
97static inline unsigned extractITMaskBit(unsigned Mask, unsigned Position) {
98 // Position==0 means we're not in an IT block at all. Position==1
99 // means we want the first state bit, which is always 0 (Then).
100 // Position==2 means we want the second state bit, stored at bit 3
101 // of Mask, and so on downwards. So (5 - Position) will shift the
102 // right bit down to bit 0, including the always-0 bit at bit 4 for
103 // the mandatory initial Then.
104 return (Mask >> (5 - Position) & 1);
105}
106
107class UnwindContext {
108 using Locs = SmallVector<SMLoc, 4>;
109
110 MCAsmParser &Parser;
111 Locs FnStartLocs;
112 Locs CantUnwindLocs;
113 Locs PersonalityLocs;
114 Locs PersonalityIndexLocs;
115 Locs HandlerDataLocs;
116 MCRegister FPReg;
117
118public:
119 UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {}
120
121 bool hasFnStart() const { return !FnStartLocs.empty(); }
122 bool cantUnwind() const { return !CantUnwindLocs.empty(); }
123 bool hasHandlerData() const { return !HandlerDataLocs.empty(); }
124
125 bool hasPersonality() const {
126 return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
127 }
128
129 void recordFnStart(SMLoc L) { FnStartLocs.push_back(L); }
130 void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(L); }
131 void recordPersonality(SMLoc L) { PersonalityLocs.push_back(L); }
132 void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(L); }
133 void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(L); }
134
135 void saveFPReg(MCRegister Reg) { FPReg = Reg; }
136 MCRegister getFPReg() const { return FPReg; }
137
138 void emitFnStartLocNotes() const {
139 for (const SMLoc &Loc : FnStartLocs)
140 Parser.Note(Loc, ".fnstart was specified here");
141 }
142
143 void emitCantUnwindLocNotes() const {
144 for (const SMLoc &Loc : CantUnwindLocs)
145 Parser.Note(Loc, ".cantunwind was specified here");
146 }
147
148 void emitHandlerDataLocNotes() const {
149 for (const SMLoc &Loc : HandlerDataLocs)
150 Parser.Note(Loc, ".handlerdata was specified here");
151 }
152
153 void emitPersonalityLocNotes() const {
154 for (Locs::const_iterator PI = PersonalityLocs.begin(),
155 PE = PersonalityLocs.end(),
156 PII = PersonalityIndexLocs.begin(),
157 PIE = PersonalityIndexLocs.end();
158 PI != PE || PII != PIE;) {
159 if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
160 Parser.Note(*PI++, ".personality was specified here");
161 else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
162 Parser.Note(*PII++, ".personalityindex was specified here");
163 else
164 llvm_unreachable(".personality and .personalityindex cannot be "
165 "at the same location");
166 }
167 }
168
169 void reset() {
170 FnStartLocs = Locs();
171 CantUnwindLocs = Locs();
172 PersonalityLocs = Locs();
173 HandlerDataLocs = Locs();
174 PersonalityIndexLocs = Locs();
175 FPReg = ARM::SP;
176 }
177};
178
179// Various sets of ARM instruction mnemonics which are used by the asm parser
180class ARMMnemonicSets {
181 StringSet<> CDE;
182 StringSet<> CDEWithVPTSuffix;
183public:
184 ARMMnemonicSets(const MCSubtargetInfo &STI);
185
186 /// Returns true iff a given mnemonic is a CDE instruction
187 bool isCDEInstr(StringRef Mnemonic) {
188 // Quick check before searching the set
189 if (!Mnemonic.starts_with("cx") && !Mnemonic.starts_with("vcx"))
190 return false;
191 return CDE.count(Mnemonic);
192 }
193
194 /// Returns true iff a given mnemonic is a VPT-predicable CDE instruction
195 /// (possibly with a predication suffix "e" or "t")
196 bool isVPTPredicableCDEInstr(StringRef Mnemonic) {
197 if (!Mnemonic.starts_with("vcx"))
198 return false;
199 return CDEWithVPTSuffix.count(Mnemonic);
200 }
201
202 /// Returns true iff a given mnemonic is an IT-predicable CDE instruction
203 /// (possibly with a condition suffix)
204 bool isITPredicableCDEInstr(StringRef Mnemonic) {
205 if (!Mnemonic.starts_with("cx"))
206 return false;
207 return Mnemonic.starts_with("cx1a") || Mnemonic.starts_with("cx1da") ||
208 Mnemonic.starts_with("cx2a") || Mnemonic.starts_with("cx2da") ||
209 Mnemonic.starts_with("cx3a") || Mnemonic.starts_with("cx3da");
210 }
211
212 /// Return true iff a given mnemonic is an integer CDE instruction with
213 /// dual-register destination
214 bool isCDEDualRegInstr(StringRef Mnemonic) {
215 if (!Mnemonic.starts_with("cx"))
216 return false;
217 return Mnemonic == "cx1d" || Mnemonic == "cx1da" ||
218 Mnemonic == "cx2d" || Mnemonic == "cx2da" ||
219 Mnemonic == "cx3d" || Mnemonic == "cx3da";
220 }
221};
222
223ARMMnemonicSets::ARMMnemonicSets(const MCSubtargetInfo &STI) {
224 for (StringRef Mnemonic: { "cx1", "cx1a", "cx1d", "cx1da",
225 "cx2", "cx2a", "cx2d", "cx2da",
226 "cx3", "cx3a", "cx3d", "cx3da", })
227 CDE.insert(Mnemonic);
228 for (StringRef Mnemonic :
229 {"vcx1", "vcx1a", "vcx2", "vcx2a", "vcx3", "vcx3a"}) {
230 CDE.insert(Mnemonic);
231 CDEWithVPTSuffix.insert(Mnemonic);
232 CDEWithVPTSuffix.insert(std::string(Mnemonic) + "t");
233 CDEWithVPTSuffix.insert(std::string(Mnemonic) + "e");
234 }
235}
236
237class ARMAsmParser : public MCTargetAsmParser {
238 const MCRegisterInfo *MRI;
239 UnwindContext UC;
240 ARMMnemonicSets MS;
241
242 ARMTargetStreamer &getTargetStreamer() {
243 assert(getParser().getStreamer().getTargetStreamer() &&
244 "do not have a target streamer");
246 return static_cast<ARMTargetStreamer &>(TS);
247 }
248
249 // Map of register aliases registers via the .req directive.
250 StringMap<MCRegister> RegisterReqs;
251
252 bool NextSymbolIsThumb;
253
254 bool useImplicitITThumb() const {
255 return ImplicitItMode == ImplicitItModeTy::Always ||
256 ImplicitItMode == ImplicitItModeTy::ThumbOnly;
257 }
258
259 bool useImplicitITARM() const {
260 return ImplicitItMode == ImplicitItModeTy::Always ||
261 ImplicitItMode == ImplicitItModeTy::ARMOnly;
262 }
263
264 struct {
265 ARMCC::CondCodes Cond; // Condition for IT block.
266 unsigned Mask:4; // Condition mask for instructions.
267 // Starting at first 1 (from lsb).
268 // '1' condition as indicated in IT.
269 // '0' inverse of condition (else).
270 // Count of instructions in IT block is
271 // 4 - trailingzeroes(mask)
272 // Note that this does not have the same encoding
273 // as in the IT instruction, which also depends
274 // on the low bit of the condition code.
275
276 unsigned CurPosition; // Current position in parsing of IT
277 // block. In range [0,4], with 0 being the IT
278 // instruction itself. Initialized according to
279 // count of instructions in block. ~0U if no
280 // active IT block.
281
282 bool IsExplicit; // true - The IT instruction was present in the
283 // input, we should not modify it.
284 // false - The IT instruction was added
285 // implicitly, we can extend it if that
286 // would be legal.
287 } ITState;
288
289 SmallVector<MCInst, 4> PendingConditionalInsts;
290
291 void flushPendingInstructions(MCStreamer &Out) override {
292 if (!inImplicitITBlock()) {
293 assert(PendingConditionalInsts.size() == 0);
294 return;
295 }
296
297 // Emit the IT instruction
298 MCInst ITInst;
299 ITInst.setOpcode(ARM::t2IT);
300 ITInst.addOperand(MCOperand::createImm(ITState.Cond));
301 ITInst.addOperand(MCOperand::createImm(ITState.Mask));
302 Out.emitInstruction(ITInst, getSTI());
303
304 // Emit the conditional instructions
305 assert(PendingConditionalInsts.size() <= 4);
306 for (const MCInst &Inst : PendingConditionalInsts) {
307 Out.emitInstruction(Inst, getSTI());
308 }
309 PendingConditionalInsts.clear();
310
311 // Clear the IT state
312 ITState.Mask = 0;
313 ITState.CurPosition = ~0U;
314 }
315
316 bool inITBlock() { return ITState.CurPosition != ~0U; }
317 bool inExplicitITBlock() { return inITBlock() && ITState.IsExplicit; }
318 bool inImplicitITBlock() { return inITBlock() && !ITState.IsExplicit; }
319
320 bool lastInITBlock() {
321 return ITState.CurPosition == 4 - (unsigned)llvm::countr_zero(ITState.Mask);
322 }
323
324 void forwardITPosition() {
325 if (!inITBlock()) return;
326 // Move to the next instruction in the IT block, if there is one. If not,
327 // mark the block as done, except for implicit IT blocks, which we leave
328 // open until we find an instruction that can't be added to it.
329 unsigned TZ = llvm::countr_zero(ITState.Mask);
330 if (++ITState.CurPosition == 5 - TZ && ITState.IsExplicit)
331 ITState.CurPosition = ~0U; // Done with the IT block after this.
332 }
333
334 // Rewind the state of the current IT block, removing the last slot from it.
335 void rewindImplicitITPosition() {
336 assert(inImplicitITBlock());
337 assert(ITState.CurPosition > 1);
338 ITState.CurPosition--;
339 unsigned TZ = llvm::countr_zero(ITState.Mask);
340 unsigned NewMask = 0;
341 NewMask |= ITState.Mask & (0xC << TZ);
342 NewMask |= 0x2 << TZ;
343 ITState.Mask = NewMask;
344 }
345
346 // Rewind the state of the current IT block, removing the last slot from it.
347 // If we were at the first slot, this closes the IT block.
348 void discardImplicitITBlock() {
349 assert(inImplicitITBlock());
350 assert(ITState.CurPosition == 1);
351 ITState.CurPosition = ~0U;
352 }
353
354 // Get the condition code corresponding to the current IT block slot.
355 ARMCC::CondCodes currentITCond() {
356 unsigned MaskBit = extractITMaskBit(ITState.Mask, ITState.CurPosition);
357 return MaskBit ? ARMCC::getOppositeCondition(ITState.Cond) : ITState.Cond;
358 }
359
360 // Invert the condition of the current IT block slot without changing any
361 // other slots in the same block.
362 void invertCurrentITCondition() {
363 if (ITState.CurPosition == 1) {
364 ITState.Cond = ARMCC::getOppositeCondition(ITState.Cond);
365 } else {
366 ITState.Mask ^= 1 << (5 - ITState.CurPosition);
367 }
368 }
369
370 // Returns true if the current IT block is full (all 4 slots used).
371 bool isITBlockFull() {
372 return inITBlock() && (ITState.Mask & 1);
373 }
374
375 // Extend the current implicit IT block to have one more slot with the given
376 // condition code.
377 void extendImplicitITBlock(ARMCC::CondCodes Cond) {
378 assert(inImplicitITBlock());
379 assert(!isITBlockFull());
380 assert(Cond == ITState.Cond ||
381 Cond == ARMCC::getOppositeCondition(ITState.Cond));
382 unsigned TZ = llvm::countr_zero(ITState.Mask);
383 unsigned NewMask = 0;
384 // Keep any existing condition bits.
385 NewMask |= ITState.Mask & (0xE << TZ);
386 // Insert the new condition bit.
387 NewMask |= (Cond != ITState.Cond) << TZ;
388 // Move the trailing 1 down one bit.
389 NewMask |= 1 << (TZ - 1);
390 ITState.Mask = NewMask;
391 }
392
393 // Create a new implicit IT block with a dummy condition code.
394 void startImplicitITBlock() {
395 assert(!inITBlock());
396 ITState.Cond = ARMCC::AL;
397 ITState.Mask = 8;
398 ITState.CurPosition = 1;
399 ITState.IsExplicit = false;
400 }
401
402 // Create a new explicit IT block with the given condition and mask.
403 // The mask should be in the format used in ARMOperand and
404 // MCOperand, with a 1 implying 'e', regardless of the low bit of
405 // the condition.
406 void startExplicitITBlock(ARMCC::CondCodes Cond, unsigned Mask) {
407 assert(!inITBlock());
408 ITState.Cond = Cond;
409 ITState.Mask = Mask;
410 ITState.CurPosition = 0;
411 ITState.IsExplicit = true;
412 }
413
414 struct {
415 unsigned Mask : 4;
416 unsigned CurPosition;
417 } VPTState;
418 bool inVPTBlock() { return VPTState.CurPosition != ~0U; }
419 void forwardVPTPosition() {
420 if (!inVPTBlock()) return;
421 unsigned TZ = llvm::countr_zero(VPTState.Mask);
422 if (++VPTState.CurPosition == 5 - TZ)
423 VPTState.CurPosition = ~0U;
424 }
425
426 void Note(SMLoc L, const Twine &Msg, SMRange Range = std::nullopt) {
427 return getParser().Note(L, Msg, Range);
428 }
429
430 bool Warning(SMLoc L, const Twine &Msg, SMRange Range = std::nullopt) {
431 return getParser().Warning(L, Msg, Range);
432 }
433
434 bool Error(SMLoc L, const Twine &Msg, SMRange Range = std::nullopt) {
435 return getParser().Error(L, Msg, Range);
436 }
437
438 bool validatetLDMRegList(const MCInst &Inst, const OperandVector &Operands,
439 unsigned MnemonicOpsEndInd, unsigned ListIndex,
440 bool IsARPop = false);
441 bool validatetSTMRegList(const MCInst &Inst, const OperandVector &Operands,
442 unsigned MnemonicOpsEndInd, unsigned ListIndex);
443
444 MCRegister tryParseRegister(bool AllowOutofBoundReg = false);
445 bool tryParseRegisterWithWriteBack(OperandVector &);
446 int tryParseShiftRegister(OperandVector &);
447 std::optional<ARM_AM::ShiftOpc> tryParseShiftToken();
448 bool parseRegisterList(OperandVector &, bool EnforceOrder = true,
449 bool AllowRAAC = false, bool IsLazyLoadStore = false,
450 bool IsVSCCLRM = false);
451 bool parseMemory(OperandVector &);
452 bool parseOperand(OperandVector &, StringRef Mnemonic);
453 bool parseImmExpr(int64_t &Out);
454 bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
455 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
456 unsigned &ShiftAmount);
457 bool parseLiteralValues(unsigned Size, SMLoc L);
458 bool parseDirectiveThumb(SMLoc L);
459 bool parseDirectiveARM(SMLoc L);
460 bool parseDirectiveThumbFunc(SMLoc L);
461 bool parseDirectiveCode(SMLoc L);
462 bool parseDirectiveSyntax(SMLoc L);
463 bool parseDirectiveReq(StringRef Name, SMLoc L);
464 bool parseDirectiveUnreq(SMLoc L);
465 bool parseDirectiveArch(SMLoc L);
466 bool parseDirectiveEabiAttr(SMLoc L);
467 bool parseDirectiveCPU(SMLoc L);
468 bool parseDirectiveFPU(SMLoc L);
469 bool parseDirectiveFnStart(SMLoc L);
470 bool parseDirectiveFnEnd(SMLoc L);
471 bool parseDirectiveCantUnwind(SMLoc L);
472 bool parseDirectivePersonality(SMLoc L);
473 bool parseDirectiveHandlerData(SMLoc L);
474 bool parseDirectiveSetFP(SMLoc L);
475 bool parseDirectivePad(SMLoc L);
476 bool parseDirectiveRegSave(SMLoc L, bool IsVector);
477 bool parseDirectiveInst(SMLoc L, char Suffix = '\0');
478 bool parseDirectiveLtorg(SMLoc L);
479 bool parseDirectiveEven(SMLoc L);
480 bool parseDirectivePersonalityIndex(SMLoc L);
481 bool parseDirectiveUnwindRaw(SMLoc L);
482 bool parseDirectiveTLSDescSeq(SMLoc L);
483 bool parseDirectiveMovSP(SMLoc L);
484 bool parseDirectiveObjectArch(SMLoc L);
485 bool parseDirectiveArchExtension(SMLoc L);
486 bool parseDirectiveAlign(SMLoc L);
487 bool parseDirectiveThumbSet(SMLoc L);
488
489 bool parseDirectiveSEHAllocStack(SMLoc L, bool Wide);
490 bool parseDirectiveSEHSaveRegs(SMLoc L, bool Wide);
491 bool parseDirectiveSEHSaveSP(SMLoc L);
492 bool parseDirectiveSEHSaveFRegs(SMLoc L);
493 bool parseDirectiveSEHSaveLR(SMLoc L);
494 bool parseDirectiveSEHPrologEnd(SMLoc L, bool Fragment);
495 bool parseDirectiveSEHNop(SMLoc L, bool Wide);
496 bool parseDirectiveSEHEpilogStart(SMLoc L, bool Condition);
497 bool parseDirectiveSEHEpilogEnd(SMLoc L);
498 bool parseDirectiveSEHCustom(SMLoc L);
499
500 std::unique_ptr<ARMOperand> defaultCondCodeOp();
501 std::unique_ptr<ARMOperand> defaultCCOutOp();
502 std::unique_ptr<ARMOperand> defaultVPTPredOp();
503
504 bool isMnemonicVPTPredicable(StringRef Mnemonic, StringRef ExtraToken);
505 StringRef splitMnemonic(StringRef Mnemonic, StringRef ExtraToken,
506 ARMCC::CondCodes &PredicationCode,
507 ARMVCC::VPTCodes &VPTPredicationCode,
508 bool &CarrySetting, unsigned &ProcessorIMod,
509 StringRef &ITMask);
510 void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef ExtraToken,
511 StringRef FullInst, bool &CanAcceptCarrySet,
512 bool &CanAcceptPredicationCode,
513 bool &CanAcceptVPTPredicationCode);
514 bool enableArchExtFeature(StringRef Name, SMLoc &ExtLoc);
515
516 void tryConvertingToTwoOperandForm(StringRef Mnemonic,
517 ARMCC::CondCodes PredicationCode,
518 bool CarrySetting, OperandVector &Operands,
519 unsigned MnemonicOpsEndInd);
520
521 bool CDEConvertDualRegOperand(StringRef Mnemonic, OperandVector &Operands,
522 unsigned MnemonicOpsEndInd);
523
524 bool isThumb() const {
525 // FIXME: Can tablegen auto-generate this?
526 return getSTI().hasFeature(ARM::ModeThumb);
527 }
528
529 bool isThumbOne() const {
530 return isThumb() && !getSTI().hasFeature(ARM::FeatureThumb2);
531 }
532
533 bool isThumbTwo() const {
534 return isThumb() && getSTI().hasFeature(ARM::FeatureThumb2);
535 }
536
537 bool hasThumb() const {
538 return getSTI().hasFeature(ARM::HasV4TOps);
539 }
540
541 bool hasThumb2() const {
542 return getSTI().hasFeature(ARM::FeatureThumb2);
543 }
544
545 bool hasV6Ops() const {
546 return getSTI().hasFeature(ARM::HasV6Ops);
547 }
548
549 bool hasV6T2Ops() const {
550 return getSTI().hasFeature(ARM::HasV6T2Ops);
551 }
552
553 bool hasV6MOps() const {
554 return getSTI().hasFeature(ARM::HasV6MOps);
555 }
556
557 bool hasV7Ops() const {
558 return getSTI().hasFeature(ARM::HasV7Ops);
559 }
560
561 bool hasV8Ops() const {
562 return getSTI().hasFeature(ARM::HasV8Ops);
563 }
564
565 bool hasV8MBaseline() const {
566 return getSTI().hasFeature(ARM::HasV8MBaselineOps);
567 }
568
569 bool hasV8MMainline() const {
570 return getSTI().hasFeature(ARM::HasV8MMainlineOps);
571 }
572 bool hasV8_1MMainline() const {
573 return getSTI().hasFeature(ARM::HasV8_1MMainlineOps);
574 }
575 bool hasMVEFloat() const {
576 return getSTI().hasFeature(ARM::HasMVEFloatOps);
577 }
578 bool hasCDE() const {
579 return getSTI().hasFeature(ARM::HasCDEOps);
580 }
581 bool has8MSecExt() const {
582 return getSTI().hasFeature(ARM::Feature8MSecExt);
583 }
584
585 bool hasARM() const {
586 return !getSTI().hasFeature(ARM::FeatureNoARM);
587 }
588
589 bool hasDSP() const {
590 return getSTI().hasFeature(ARM::FeatureDSP);
591 }
592
593 bool hasD32() const {
594 return getSTI().hasFeature(ARM::FeatureD32);
595 }
596
597 bool hasV8_1aOps() const {
598 return getSTI().hasFeature(ARM::HasV8_1aOps);
599 }
600
601 bool hasRAS() const {
602 return getSTI().hasFeature(ARM::FeatureRAS);
603 }
604
605 void SwitchMode() {
606 MCSubtargetInfo &STI = copySTI();
607 auto FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
609 }
610
611 void FixModeAfterArchChange(bool WasThumb, SMLoc Loc);
612
613 bool isMClass() const {
614 return getSTI().hasFeature(ARM::FeatureMClass);
615 }
616
617 /// @name Auto-generated Match Functions
618 /// {
619
620#define GET_ASSEMBLER_HEADER
621#include "ARMGenAsmMatcher.inc"
622
623 /// }
624
625 ParseStatus parseITCondCode(OperandVector &);
626 ParseStatus parseCoprocNumOperand(OperandVector &);
627 ParseStatus parseCoprocRegOperand(OperandVector &);
628 ParseStatus parseCoprocOptionOperand(OperandVector &);
629 ParseStatus parseMemBarrierOptOperand(OperandVector &);
630 ParseStatus parseTraceSyncBarrierOptOperand(OperandVector &);
631 ParseStatus parseInstSyncBarrierOptOperand(OperandVector &);
632 ParseStatus parseProcIFlagsOperand(OperandVector &);
633 ParseStatus parseMSRMaskOperand(OperandVector &);
634 ParseStatus parseBankedRegOperand(OperandVector &);
635 ParseStatus parsePKHImm(OperandVector &O, ARM_AM::ShiftOpc, int Low,
636 int High);
637 ParseStatus parsePKHLSLImm(OperandVector &O) {
638 return parsePKHImm(O, ARM_AM::lsl, 0, 31);
639 }
640 ParseStatus parsePKHASRImm(OperandVector &O) {
641 return parsePKHImm(O, ARM_AM::asr, 1, 32);
642 }
643 ParseStatus parseSetEndImm(OperandVector &);
644 ParseStatus parseShifterImm(OperandVector &);
645 ParseStatus parseRotImm(OperandVector &);
646 ParseStatus parseModImm(OperandVector &);
647 ParseStatus parseBitfield(OperandVector &);
648 ParseStatus parsePostIdxReg(OperandVector &);
649 ParseStatus parseAM3Offset(OperandVector &);
650 ParseStatus parseFPImm(OperandVector &);
651 ParseStatus parseVectorList(OperandVector &);
652 ParseStatus parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
653 SMLoc &EndLoc);
654
655 // Asm Match Converter Methods
656 void cvtThumbMultiply(MCInst &Inst, const OperandVector &);
657 void cvtThumbBranches(MCInst &Inst, const OperandVector &);
658 void cvtMVEVMOVQtoDReg(MCInst &Inst, const OperandVector &);
659
660 bool validateInstruction(MCInst &Inst, const OperandVector &Ops,
661 unsigned MnemonicOpsEndInd);
662 bool processInstruction(MCInst &Inst, const OperandVector &Ops,
663 unsigned MnemonicOpsEndInd, MCStreamer &Out);
664 bool shouldOmitVectorPredicateOperand(StringRef Mnemonic,
666 unsigned MnemonicOpsEndInd);
667 bool isITBlockTerminator(MCInst &Inst) const;
668
669 void fixupGNULDRDAlias(StringRef Mnemonic, OperandVector &Operands,
670 unsigned MnemonicOpsEndInd);
671 bool validateLDRDSTRD(MCInst &Inst, const OperandVector &Operands, bool Load,
672 bool ARMMode, bool Writeback,
673 unsigned MnemonicOpsEndInd);
674
675public:
676 enum ARMMatchResultTy {
677 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
678 Match_RequiresNotITBlock,
679 Match_RequiresV6,
680 Match_RequiresThumb2,
681 Match_RequiresV8,
682 Match_RequiresFlagSetting,
683#define GET_OPERAND_DIAGNOSTIC_TYPES
684#include "ARMGenAsmMatcher.inc"
685
686 };
687
688 ARMAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
689 const MCInstrInfo &MII, const MCTargetOptions &Options)
690 : MCTargetAsmParser(Options, STI, MII), UC(Parser), MS(STI) {
692
693 // Cache the MCRegisterInfo.
695
696 // Initialize the set of available features.
697 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
698
699 // Add build attributes based on the selected target.
701 getTargetStreamer().emitTargetAttributes(STI);
702
703 // Not in an ITBlock to start with.
704 ITState.CurPosition = ~0U;
705
706 VPTState.CurPosition = ~0U;
707
708 NextSymbolIsThumb = false;
709 }
710
711 // Implementation of the MCTargetAsmParser interface:
712 bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
714 SMLoc &EndLoc) override;
716 SMLoc NameLoc, OperandVector &Operands) override;
717 bool ParseDirective(AsmToken DirectiveID) override;
718
720 unsigned Kind) override;
721 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
722 unsigned
724 const OperandVector &Operands) override;
725
726 bool matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
729 bool MatchingInlineAsm) override;
730 unsigned MatchInstruction(OperandVector &Operands, MCInst &Inst,
732 bool MatchingInlineAsm, bool &EmitInITBlock,
733 MCStreamer &Out);
734
735 struct NearMissMessage {
736 SMLoc Loc;
737 SmallString<128> Message;
738 };
739
740 const char *getCustomOperandDiag(ARMMatchResultTy MatchError);
741
742 void FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
745 void ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses, SMLoc IDLoc,
747
749 getVariantKindForName(StringRef Name) const override;
750
751 void doBeforeLabelEmit(MCSymbol *Symbol, SMLoc IDLoc) override;
752
753 void onLabelParsed(MCSymbol *Symbol) override;
754
755 const MCInstrDesc &getInstrDesc(unsigned int Opcode) const {
756 return MII.get(Opcode);
757 }
758
759 bool hasMVE() const { return getSTI().hasFeature(ARM::HasMVEIntegerOps); }
760
761 // Return the low-subreg of a given Q register.
762 MCRegister getDRegFromQReg(MCRegister QReg) const {
763 return MRI->getSubReg(QReg, ARM::dsub_0);
764 }
765
766 const MCRegisterInfo *getMRI() const { return MRI; }
767};
768
769/// ARMOperand - Instances of this class represent a parsed ARM machine
770/// operand.
771class ARMOperand : public MCParsedAsmOperand {
772 enum KindTy {
773 k_CondCode,
774 k_VPTPred,
775 k_CCOut,
776 k_ITCondMask,
777 k_CoprocNum,
778 k_CoprocReg,
779 k_CoprocOption,
780 k_Immediate,
781 k_MemBarrierOpt,
782 k_InstSyncBarrierOpt,
783 k_TraceSyncBarrierOpt,
784 k_Memory,
785 k_PostIndexRegister,
786 k_MSRMask,
787 k_BankedReg,
788 k_ProcIFlags,
789 k_VectorIndex,
790 k_Register,
791 k_RegisterList,
792 k_RegisterListWithAPSR,
793 k_DPRRegisterList,
794 k_SPRRegisterList,
795 k_FPSRegisterListWithVPR,
796 k_FPDRegisterListWithVPR,
797 k_VectorList,
798 k_VectorListAllLanes,
799 k_VectorListIndexed,
800 k_ShiftedRegister,
801 k_ShiftedImmediate,
802 k_ShifterImmediate,
803 k_RotateImmediate,
804 k_ModifiedImmediate,
805 k_ConstantPoolImmediate,
806 k_BitfieldDescriptor,
807 k_Token,
808 } Kind;
809
810 SMLoc StartLoc, EndLoc, AlignmentLoc;
812
813 ARMAsmParser *Parser;
814
815 struct CCOp {
817 };
818
819 struct VCCOp {
821 };
822
823 struct CopOp {
824 unsigned Val;
825 };
826
827 struct CoprocOptionOp {
828 unsigned Val;
829 };
830
831 struct ITMaskOp {
832 unsigned Mask:4;
833 };
834
835 struct MBOptOp {
836 ARM_MB::MemBOpt Val;
837 };
838
839 struct ISBOptOp {
841 };
842
843 struct TSBOptOp {
845 };
846
847 struct IFlagsOp {
849 };
850
851 struct MMaskOp {
852 unsigned Val;
853 };
854
855 struct BankedRegOp {
856 unsigned Val;
857 };
858
859 struct TokOp {
860 const char *Data;
861 unsigned Length;
862 };
863
864 struct RegOp {
865 MCRegister RegNum;
866 };
867
868 // A vector register list is a sequential list of 1 to 4 registers.
869 struct VectorListOp {
870 MCRegister RegNum;
871 unsigned Count;
872 unsigned LaneIndex;
873 bool isDoubleSpaced;
874 };
875
876 struct VectorIndexOp {
877 unsigned Val;
878 };
879
880 struct ImmOp {
881 const MCExpr *Val;
882 };
883
884 /// Combined record for all forms of ARM address expressions.
885 struct MemoryOp {
886 MCRegister BaseRegNum;
887 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
888 // was specified.
889 const MCExpr *OffsetImm; // Offset immediate value
890 MCRegister OffsetRegNum; // Offset register num, when OffsetImm == NULL
891 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
892 unsigned ShiftImm; // shift for OffsetReg.
893 unsigned Alignment; // 0 = no alignment specified
894 // n = alignment in bytes (2, 4, 8, 16, or 32)
895 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit)
896 };
897
898 struct PostIdxRegOp {
899 MCRegister RegNum;
900 bool isAdd;
901 ARM_AM::ShiftOpc ShiftTy;
902 unsigned ShiftImm;
903 };
904
905 struct ShifterImmOp {
906 bool isASR;
907 unsigned Imm;
908 };
909
910 struct RegShiftedRegOp {
911 ARM_AM::ShiftOpc ShiftTy;
912 MCRegister SrcReg;
913 MCRegister ShiftReg;
914 unsigned ShiftImm;
915 };
916
917 struct RegShiftedImmOp {
918 ARM_AM::ShiftOpc ShiftTy;
919 MCRegister SrcReg;
920 unsigned ShiftImm;
921 };
922
923 struct RotImmOp {
924 unsigned Imm;
925 };
926
927 struct ModImmOp {
928 unsigned Bits;
929 unsigned Rot;
930 };
931
932 struct BitfieldOp {
933 unsigned LSB;
934 unsigned Width;
935 };
936
937 union {
938 struct CCOp CC;
939 struct VCCOp VCC;
940 struct CopOp Cop;
941 struct CoprocOptionOp CoprocOption;
942 struct MBOptOp MBOpt;
943 struct ISBOptOp ISBOpt;
944 struct TSBOptOp TSBOpt;
945 struct ITMaskOp ITMask;
946 struct IFlagsOp IFlags;
947 struct MMaskOp MMask;
948 struct BankedRegOp BankedReg;
949 struct TokOp Tok;
950 struct RegOp Reg;
951 struct VectorListOp VectorList;
952 struct VectorIndexOp VectorIndex;
953 struct ImmOp Imm;
954 struct MemoryOp Memory;
955 struct PostIdxRegOp PostIdxReg;
956 struct ShifterImmOp ShifterImm;
957 struct RegShiftedRegOp RegShiftedReg;
958 struct RegShiftedImmOp RegShiftedImm;
959 struct RotImmOp RotImm;
960 struct ModImmOp ModImm;
961 struct BitfieldOp Bitfield;
962 };
963
964public:
965 ARMOperand(KindTy K, ARMAsmParser &Parser) : Kind(K), Parser(&Parser) {}
966
967 /// getStartLoc - Get the location of the first token of this operand.
968 SMLoc getStartLoc() const override { return StartLoc; }
969
970 /// getEndLoc - Get the location of the last token of this operand.
971 SMLoc getEndLoc() const override { return EndLoc; }
972
973 /// getLocRange - Get the range between the first and last token of this
974 /// operand.
975 SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
976
977 /// getAlignmentLoc - Get the location of the Alignment token of this operand.
978 SMLoc getAlignmentLoc() const {
979 assert(Kind == k_Memory && "Invalid access!");
980 return AlignmentLoc;
981 }
982
984 assert(Kind == k_CondCode && "Invalid access!");
985 return CC.Val;
986 }
987
988 ARMVCC::VPTCodes getVPTPred() const {
989 assert(isVPTPred() && "Invalid access!");
990 return VCC.Val;
991 }
992
993 unsigned getCoproc() const {
994 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
995 return Cop.Val;
996 }
997
998 StringRef getToken() const {
999 assert(Kind == k_Token && "Invalid access!");
1000 return StringRef(Tok.Data, Tok.Length);
1001 }
1002
1003 MCRegister getReg() const override {
1004 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
1005 return Reg.RegNum;
1006 }
1007
1008 const SmallVectorImpl<MCRegister> &getRegList() const {
1009 assert((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR ||
1010 Kind == k_DPRRegisterList || Kind == k_SPRRegisterList ||
1011 Kind == k_FPSRegisterListWithVPR ||
1012 Kind == k_FPDRegisterListWithVPR) &&
1013 "Invalid access!");
1014 return Registers;
1015 }
1016
1017 const MCExpr *getImm() const {
1018 assert(isImm() && "Invalid access!");
1019 return Imm.Val;
1020 }
1021
1022 const MCExpr *getConstantPoolImm() const {
1023 assert(isConstantPoolImm() && "Invalid access!");
1024 return Imm.Val;
1025 }
1026
1027 unsigned getVectorIndex() const {
1028 assert(Kind == k_VectorIndex && "Invalid access!");
1029 return VectorIndex.Val;
1030 }
1031
1032 ARM_MB::MemBOpt getMemBarrierOpt() const {
1033 assert(Kind == k_MemBarrierOpt && "Invalid access!");
1034 return MBOpt.Val;
1035 }
1036
1037 ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const {
1038 assert(Kind == k_InstSyncBarrierOpt && "Invalid access!");
1039 return ISBOpt.Val;
1040 }
1041
1042 ARM_TSB::TraceSyncBOpt getTraceSyncBarrierOpt() const {
1043 assert(Kind == k_TraceSyncBarrierOpt && "Invalid access!");
1044 return TSBOpt.Val;
1045 }
1046
1047 ARM_PROC::IFlags getProcIFlags() const {
1048 assert(Kind == k_ProcIFlags && "Invalid access!");
1049 return IFlags.Val;
1050 }
1051
1052 unsigned getMSRMask() const {
1053 assert(Kind == k_MSRMask && "Invalid access!");
1054 return MMask.Val;
1055 }
1056
1057 unsigned getBankedReg() const {
1058 assert(Kind == k_BankedReg && "Invalid access!");
1059 return BankedReg.Val;
1060 }
1061
1062 bool isCoprocNum() const { return Kind == k_CoprocNum; }
1063 bool isCoprocReg() const { return Kind == k_CoprocReg; }
1064 bool isCoprocOption() const { return Kind == k_CoprocOption; }
1065 bool isCondCode() const { return Kind == k_CondCode; }
1066 bool isVPTPred() const { return Kind == k_VPTPred; }
1067 bool isCCOut() const { return Kind == k_CCOut; }
1068 bool isITMask() const { return Kind == k_ITCondMask; }
1069 bool isITCondCode() const { return Kind == k_CondCode; }
1070 bool isImm() const override {
1071 return Kind == k_Immediate;
1072 }
1073
1074 bool isARMBranchTarget() const {
1075 if (!isImm()) return false;
1076
1077 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1078 return CE->getValue() % 4 == 0;
1079 return true;
1080 }
1081
1082
1083 bool isThumbBranchTarget() const {
1084 if (!isImm()) return false;
1085
1086 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1087 return CE->getValue() % 2 == 0;
1088 return true;
1089 }
1090
1091 // checks whether this operand is an unsigned offset which fits is a field
1092 // of specified width and scaled by a specific number of bits
1093 template<unsigned width, unsigned scale>
1094 bool isUnsignedOffset() const {
1095 if (!isImm()) return false;
1096 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1097 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1098 int64_t Val = CE->getValue();
1099 int64_t Align = 1LL << scale;
1100 int64_t Max = Align * ((1LL << width) - 1);
1101 return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max);
1102 }
1103 return false;
1104 }
1105
1106 // checks whether this operand is an signed offset which fits is a field
1107 // of specified width and scaled by a specific number of bits
1108 template<unsigned width, unsigned scale>
1109 bool isSignedOffset() const {
1110 if (!isImm()) return false;
1111 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1112 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1113 int64_t Val = CE->getValue();
1114 int64_t Align = 1LL << scale;
1115 int64_t Max = Align * ((1LL << (width-1)) - 1);
1116 int64_t Min = -Align * (1LL << (width-1));
1117 return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max);
1118 }
1119 return false;
1120 }
1121
1122 // checks whether this operand is an offset suitable for the LE /
1123 // LETP instructions in Arm v8.1M
1124 bool isLEOffset() const {
1125 if (!isImm()) return false;
1126 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1127 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1128 int64_t Val = CE->getValue();
1129 return Val < 0 && Val >= -4094 && (Val & 1) == 0;
1130 }
1131 return false;
1132 }
1133
1134 // checks whether this operand is a memory operand computed as an offset
1135 // applied to PC. the offset may have 8 bits of magnitude and is represented
1136 // with two bits of shift. textually it may be either [pc, #imm], #imm or
1137 // relocable expression...
1138 bool isThumbMemPC() const {
1139 int64_t Val = 0;
1140 if (isImm()) {
1141 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1142 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
1143 if (!CE) return false;
1144 Val = CE->getValue();
1145 }
1146 else if (isGPRMem()) {
1147 if(!Memory.OffsetImm || Memory.OffsetRegNum) return false;
1148 if(Memory.BaseRegNum != ARM::PC) return false;
1149 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
1150 Val = CE->getValue();
1151 else
1152 return false;
1153 }
1154 else return false;
1155 return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
1156 }
1157
1158 bool isFPImm() const {
1159 if (!isImm()) return false;
1160 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1161 if (!CE || !isUInt<32>(CE->getValue()))
1162 return false;
1163 int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1164 return Val != -1;
1165 }
1166
1167 template<int64_t N, int64_t M>
1168 bool isImmediate() const {
1169 if (!isImm()) return false;
1170 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1171 if (!CE) return false;
1172 int64_t Value = CE->getValue();
1173 return Value >= N && Value <= M;
1174 }
1175
1176 template<int64_t N, int64_t M>
1177 bool isImmediateS4() const {
1178 if (!isImm()) return false;
1179 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1180 if (!CE) return false;
1181 int64_t Value = CE->getValue();
1182 return ((Value & 3) == 0) && Value >= N && Value <= M;
1183 }
1184 template<int64_t N, int64_t M>
1185 bool isImmediateS2() const {
1186 if (!isImm()) return false;
1187 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1188 if (!CE) return false;
1189 int64_t Value = CE->getValue();
1190 return ((Value & 1) == 0) && Value >= N && Value <= M;
1191 }
1192 bool isFBits16() const {
1193 return isImmediate<0, 17>();
1194 }
1195 bool isFBits32() const {
1196 return isImmediate<1, 33>();
1197 }
1198 bool isImm8s4() const {
1199 return isImmediateS4<-1020, 1020>();
1200 }
1201 bool isImm7s4() const {
1202 return isImmediateS4<-508, 508>();
1203 }
1204 bool isImm7Shift0() const {
1205 return isImmediate<-127, 127>();
1206 }
1207 bool isImm7Shift1() const {
1208 return isImmediateS2<-255, 255>();
1209 }
1210 bool isImm7Shift2() const {
1211 return isImmediateS4<-511, 511>();
1212 }
1213 bool isImm7() const {
1214 return isImmediate<-127, 127>();
1215 }
1216 bool isImm0_1020s4() const {
1217 return isImmediateS4<0, 1020>();
1218 }
1219 bool isImm0_508s4() const {
1220 return isImmediateS4<0, 508>();
1221 }
1222 bool isImm0_508s4Neg() const {
1223 if (!isImm()) return false;
1224 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1225 if (!CE) return false;
1226 int64_t Value = -CE->getValue();
1227 // explicitly exclude zero. we want that to use the normal 0_508 version.
1228 return ((Value & 3) == 0) && Value > 0 && Value <= 508;
1229 }
1230
1231 bool isImm0_4095Neg() const {
1232 if (!isImm()) return false;
1233 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1234 if (!CE) return false;
1235 // isImm0_4095Neg is used with 32-bit immediates only.
1236 // 32-bit immediates are zero extended to 64-bit when parsed,
1237 // thus simple -CE->getValue() results in a big negative number,
1238 // not a small positive number as intended
1239 if ((CE->getValue() >> 32) > 0) return false;
1240 uint32_t Value = -static_cast<uint32_t>(CE->getValue());
1241 return Value > 0 && Value < 4096;
1242 }
1243
1244 bool isImm0_7() const {
1245 return isImmediate<0, 7>();
1246 }
1247
1248 bool isImm1_16() const {
1249 return isImmediate<1, 16>();
1250 }
1251
1252 bool isImm1_32() const {
1253 return isImmediate<1, 32>();
1254 }
1255
1256 bool isImm8_255() const {
1257 return isImmediate<8, 255>();
1258 }
1259
1260 bool isImm0_255Expr() const {
1261 if (!isImm())
1262 return false;
1263 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1264 // If it's not a constant expression, it'll generate a fixup and be
1265 // handled later.
1266 if (!CE)
1267 return true;
1268 int64_t Value = CE->getValue();
1269 return isUInt<8>(Value);
1270 }
1271
1272 bool isImm256_65535Expr() const {
1273 if (!isImm()) return false;
1274 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1275 // If it's not a constant expression, it'll generate a fixup and be
1276 // handled later.
1277 if (!CE) return true;
1278 int64_t Value = CE->getValue();
1279 return Value >= 256 && Value < 65536;
1280 }
1281
1282 bool isImm0_65535Expr() const {
1283 if (!isImm()) return false;
1284 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1285 // If it's not a constant expression, it'll generate a fixup and be
1286 // handled later.
1287 if (!CE) return true;
1288 int64_t Value = CE->getValue();
1289 return Value >= 0 && Value < 65536;
1290 }
1291
1292 bool isImm24bit() const {
1293 return isImmediate<0, 0xffffff + 1>();
1294 }
1295
1296 bool isImmThumbSR() const {
1297 return isImmediate<1, 33>();
1298 }
1299
1300 bool isPKHLSLImm() const {
1301 return isImmediate<0, 32>();
1302 }
1303
1304 bool isPKHASRImm() const {
1305 return isImmediate<0, 33>();
1306 }
1307
1308 bool isAdrLabel() const {
1309 // If we have an immediate that's not a constant, treat it as a label
1310 // reference needing a fixup.
1311 if (isImm() && !isa<MCConstantExpr>(getImm()))
1312 return true;
1313
1314 // If it is a constant, it must fit into a modified immediate encoding.
1315 if (!isImm()) return false;
1316 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1317 if (!CE) return false;
1318 int64_t Value = CE->getValue();
1319 return (ARM_AM::getSOImmVal(Value) != -1 ||
1320 ARM_AM::getSOImmVal(-Value) != -1);
1321 }
1322
1323 bool isT2SOImm() const {
1324 // If we have an immediate that's not a constant, treat it as an expression
1325 // needing a fixup.
1326 if (isImm() && !isa<MCConstantExpr>(getImm())) {
1327 // We want to avoid matching :upper16: and :lower16: as we want these
1328 // expressions to match in isImm0_65535Expr()
1329 const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(getImm());
1330 return (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
1331 ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16));
1332 }
1333 if (!isImm()) return false;
1334 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1335 if (!CE) return false;
1336 int64_t Value = CE->getValue();
1337 return ARM_AM::getT2SOImmVal(Value) != -1;
1338 }
1339
1340 bool isT2SOImmNot() const {
1341 if (!isImm()) return false;
1342 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1343 if (!CE) return false;
1344 int64_t Value = CE->getValue();
1345 return ARM_AM::getT2SOImmVal(Value) == -1 &&
1347 }
1348
1349 bool isT2SOImmNeg() const {
1350 if (!isImm()) return false;
1351 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1352 if (!CE) return false;
1353 int64_t Value = CE->getValue();
1354 // Only use this when not representable as a plain so_imm.
1355 return ARM_AM::getT2SOImmVal(Value) == -1 &&
1357 }
1358
1359 bool isSetEndImm() const {
1360 if (!isImm()) return false;
1361 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1362 if (!CE) return false;
1363 int64_t Value = CE->getValue();
1364 return Value == 1 || Value == 0;
1365 }
1366
1367 bool isReg() const override { return Kind == k_Register; }
1368 bool isRegList() const { return Kind == k_RegisterList; }
1369 bool isRegListWithAPSR() const {
1370 return Kind == k_RegisterListWithAPSR || Kind == k_RegisterList;
1371 }
1372 bool isDReg() const {
1373 return isReg() &&
1374 ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg.RegNum);
1375 }
1376 bool isQReg() const {
1377 return isReg() &&
1378 ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg.RegNum);
1379 }
1380 bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
1381 bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
1382 bool isFPSRegListWithVPR() const { return Kind == k_FPSRegisterListWithVPR; }
1383 bool isFPDRegListWithVPR() const { return Kind == k_FPDRegisterListWithVPR; }
1384 bool isToken() const override { return Kind == k_Token; }
1385 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
1386 bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
1387 bool isTraceSyncBarrierOpt() const { return Kind == k_TraceSyncBarrierOpt; }
1388 bool isMem() const override {
1389 return isGPRMem() || isMVEMem();
1390 }
1391 bool isMVEMem() const {
1392 if (Kind != k_Memory)
1393 return false;
1394 if (Memory.BaseRegNum &&
1395 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum) &&
1396 !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Memory.BaseRegNum))
1397 return false;
1398 if (Memory.OffsetRegNum &&
1399 !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1400 Memory.OffsetRegNum))
1401 return false;
1402 return true;
1403 }
1404 bool isGPRMem() const {
1405 if (Kind != k_Memory)
1406 return false;
1407 if (Memory.BaseRegNum &&
1408 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum))
1409 return false;
1410 if (Memory.OffsetRegNum &&
1411 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.OffsetRegNum))
1412 return false;
1413 return true;
1414 }
1415 bool isShifterImm() const { return Kind == k_ShifterImmediate; }
1416 bool isRegShiftedReg() const {
1417 return Kind == k_ShiftedRegister &&
1418 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1419 RegShiftedReg.SrcReg) &&
1420 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1421 RegShiftedReg.ShiftReg);
1422 }
1423 bool isRegShiftedImm() const {
1424 return Kind == k_ShiftedImmediate &&
1425 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1426 RegShiftedImm.SrcReg);
1427 }
1428 bool isRotImm() const { return Kind == k_RotateImmediate; }
1429
1430 template<unsigned Min, unsigned Max>
1431 bool isPowerTwoInRange() const {
1432 if (!isImm()) return false;
1433 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1434 if (!CE) return false;
1435 int64_t Value = CE->getValue();
1436 return Value > 0 && llvm::popcount((uint64_t)Value) == 1 && Value >= Min &&
1437 Value <= Max;
1438 }
1439 bool isModImm() const { return Kind == k_ModifiedImmediate; }
1440
1441 bool isModImmNot() const {
1442 if (!isImm()) return false;
1443 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1444 if (!CE) return false;
1445 int64_t Value = CE->getValue();
1446 return ARM_AM::getSOImmVal(~Value) != -1;
1447 }
1448
1449 bool isModImmNeg() const {
1450 if (!isImm()) return false;
1451 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1452 if (!CE) return false;
1453 int64_t Value = CE->getValue();
1454 return ARM_AM::getSOImmVal(Value) == -1 &&
1455 ARM_AM::getSOImmVal(-Value) != -1;
1456 }
1457
1458 bool isThumbModImmNeg1_7() const {
1459 if (!isImm()) return false;
1460 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1461 if (!CE) return false;
1462 int32_t Value = -(int32_t)CE->getValue();
1463 return 0 < Value && Value < 8;
1464 }
1465
1466 bool isThumbModImmNeg8_255() const {
1467 if (!isImm()) return false;
1468 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1469 if (!CE) return false;
1470 int32_t Value = -(int32_t)CE->getValue();
1471 return 7 < Value && Value < 256;
1472 }
1473
1474 bool isConstantPoolImm() const { return Kind == k_ConstantPoolImmediate; }
1475 bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
1476 bool isPostIdxRegShifted() const {
1477 return Kind == k_PostIndexRegister &&
1478 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(PostIdxReg.RegNum);
1479 }
1480 bool isPostIdxReg() const {
1481 return isPostIdxRegShifted() && PostIdxReg.ShiftTy == ARM_AM::no_shift;
1482 }
1483 bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const {
1484 if (!isGPRMem())
1485 return false;
1486 // No offset of any kind.
1487 return !Memory.OffsetRegNum && Memory.OffsetImm == nullptr &&
1488 (alignOK || Memory.Alignment == Alignment);
1489 }
1490 bool isMemNoOffsetT2(bool alignOK = false, unsigned Alignment = 0) const {
1491 if (!isGPRMem())
1492 return false;
1493
1494 if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1495 Memory.BaseRegNum))
1496 return false;
1497
1498 // No offset of any kind.
1499 return !Memory.OffsetRegNum && Memory.OffsetImm == nullptr &&
1500 (alignOK || Memory.Alignment == Alignment);
1501 }
1502 bool isMemNoOffsetT2NoSp(bool alignOK = false, unsigned Alignment = 0) const {
1503 if (!isGPRMem())
1504 return false;
1505
1506 if (!ARMMCRegisterClasses[ARM::rGPRRegClassID].contains(
1507 Memory.BaseRegNum))
1508 return false;
1509
1510 // No offset of any kind.
1511 return !Memory.OffsetRegNum && Memory.OffsetImm == nullptr &&
1512 (alignOK || Memory.Alignment == Alignment);
1513 }
1514 bool isMemNoOffsetT(bool alignOK = false, unsigned Alignment = 0) const {
1515 if (!isGPRMem())
1516 return false;
1517
1518 if (!ARMMCRegisterClasses[ARM::tGPRRegClassID].contains(
1519 Memory.BaseRegNum))
1520 return false;
1521
1522 // No offset of any kind.
1523 return !Memory.OffsetRegNum && Memory.OffsetImm == nullptr &&
1524 (alignOK || Memory.Alignment == Alignment);
1525 }
1526 bool isMemPCRelImm12() const {
1527 if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
1528 return false;
1529 // Base register must be PC.
1530 if (Memory.BaseRegNum != ARM::PC)
1531 return false;
1532 // Immediate offset in range [-4095, 4095].
1533 if (!Memory.OffsetImm) return true;
1534 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1535 int64_t Val = CE->getValue();
1536 return (Val > -4096 && Val < 4096) ||
1537 (Val == std::numeric_limits<int32_t>::min());
1538 }
1539 return false;
1540 }
1541
1542 bool isAlignedMemory() const {
1543 return isMemNoOffset(true);
1544 }
1545
1546 bool isAlignedMemoryNone() const {
1547 return isMemNoOffset(false, 0);
1548 }
1549
1550 bool isDupAlignedMemoryNone() const {
1551 return isMemNoOffset(false, 0);
1552 }
1553
1554 bool isAlignedMemory16() const {
1555 if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1556 return true;
1557 return isMemNoOffset(false, 0);
1558 }
1559
1560 bool isDupAlignedMemory16() const {
1561 if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1562 return true;
1563 return isMemNoOffset(false, 0);
1564 }
1565
1566 bool isAlignedMemory32() const {
1567 if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1568 return true;
1569 return isMemNoOffset(false, 0);
1570 }
1571
1572 bool isDupAlignedMemory32() const {
1573 if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1574 return true;
1575 return isMemNoOffset(false, 0);
1576 }
1577
1578 bool isAlignedMemory64() const {
1579 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1580 return true;
1581 return isMemNoOffset(false, 0);
1582 }
1583
1584 bool isDupAlignedMemory64() const {
1585 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1586 return true;
1587 return isMemNoOffset(false, 0);
1588 }
1589
1590 bool isAlignedMemory64or128() const {
1591 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1592 return true;
1593 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1594 return true;
1595 return isMemNoOffset(false, 0);
1596 }
1597
1598 bool isDupAlignedMemory64or128() const {
1599 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1600 return true;
1601 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1602 return true;
1603 return isMemNoOffset(false, 0);
1604 }
1605
1606 bool isAlignedMemory64or128or256() const {
1607 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1608 return true;
1609 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1610 return true;
1611 if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32.
1612 return true;
1613 return isMemNoOffset(false, 0);
1614 }
1615
1616 bool isAddrMode2() const {
1617 if (!isGPRMem() || Memory.Alignment != 0) return false;
1618 // Check for register offset.
1619 if (Memory.OffsetRegNum) return true;
1620 // Immediate offset in range [-4095, 4095].
1621 if (!Memory.OffsetImm) return true;
1622 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1623 int64_t Val = CE->getValue();
1624 return Val > -4096 && Val < 4096;
1625 }
1626 return false;
1627 }
1628
1629 bool isAM2OffsetImm() const {
1630 if (!isImm()) return false;
1631 // Immediate offset in range [-4095, 4095].
1632 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1633 if (!CE) return false;
1634 int64_t Val = CE->getValue();
1635 return (Val == std::numeric_limits<int32_t>::min()) ||
1636 (Val > -4096 && Val < 4096);
1637 }
1638
1639 bool isAddrMode3() const {
1640 // If we have an immediate that's not a constant, treat it as a label
1641 // reference needing a fixup. If it is a constant, it's something else
1642 // and we reject it.
1643 if (isImm() && !isa<MCConstantExpr>(getImm()))
1644 return true;
1645 if (!isGPRMem() || Memory.Alignment != 0) return false;
1646 // No shifts are legal for AM3.
1647 if (Memory.ShiftType != ARM_AM::no_shift) return false;
1648 // Check for register offset.
1649 if (Memory.OffsetRegNum) return true;
1650 // Immediate offset in range [-255, 255].
1651 if (!Memory.OffsetImm) return true;
1652 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1653 int64_t Val = CE->getValue();
1654 // The #-0 offset is encoded as std::numeric_limits<int32_t>::min(), and
1655 // we have to check for this too.
1656 return (Val > -256 && Val < 256) ||
1657 Val == std::numeric_limits<int32_t>::min();
1658 }
1659 return false;
1660 }
1661
1662 bool isAM3Offset() const {
1663 if (isPostIdxReg())
1664 return true;
1665 if (!isImm())
1666 return false;
1667 // Immediate offset in range [-255, 255].
1668 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1669 if (!CE) return false;
1670 int64_t Val = CE->getValue();
1671 // Special case, #-0 is std::numeric_limits<int32_t>::min().
1672 return (Val > -256 && Val < 256) ||
1673 Val == std::numeric_limits<int32_t>::min();
1674 }
1675
1676 bool isAddrMode5() const {
1677 // If we have an immediate that's not a constant, treat it as a label
1678 // reference needing a fixup. If it is a constant, it's something else
1679 // and we reject it.
1680 if (isImm() && !isa<MCConstantExpr>(getImm()))
1681 return true;
1682 if (!isGPRMem() || Memory.Alignment != 0) return false;
1683 // Check for register offset.
1684 if (Memory.OffsetRegNum) return false;
1685 // Immediate offset in range [-1020, 1020] and a multiple of 4.
1686 if (!Memory.OffsetImm) return true;
1687 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1688 int64_t Val = CE->getValue();
1689 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1690 Val == std::numeric_limits<int32_t>::min();
1691 }
1692 return false;
1693 }
1694
1695 bool isAddrMode5FP16() const {
1696 // If we have an immediate that's not a constant, treat it as a label
1697 // reference needing a fixup. If it is a constant, it's something else
1698 // and we reject it.
1699 if (isImm() && !isa<MCConstantExpr>(getImm()))
1700 return true;
1701 if (!isGPRMem() || Memory.Alignment != 0) return false;
1702 // Check for register offset.
1703 if (Memory.OffsetRegNum) return false;
1704 // Immediate offset in range [-510, 510] and a multiple of 2.
1705 if (!Memory.OffsetImm) return true;
1706 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1707 int64_t Val = CE->getValue();
1708 return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) ||
1709 Val == std::numeric_limits<int32_t>::min();
1710 }
1711 return false;
1712 }
1713
1714 bool isMemTBB() const {
1715 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1716 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1717 return false;
1718 return true;
1719 }
1720
1721 bool isMemTBH() const {
1722 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1723 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
1724 Memory.Alignment != 0 )
1725 return false;
1726 return true;
1727 }
1728
1729 bool isMemRegOffset() const {
1730 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
1731 return false;
1732 return true;
1733 }
1734
1735 bool isT2MemRegOffset() const {
1736 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1737 Memory.Alignment != 0 || Memory.BaseRegNum == ARM::PC)
1738 return false;
1739 // Only lsl #{0, 1, 2, 3} allowed.
1740 if (Memory.ShiftType == ARM_AM::no_shift)
1741 return true;
1742 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
1743 return false;
1744 return true;
1745 }
1746
1747 bool isMemThumbRR() const {
1748 // Thumb reg+reg addressing is simple. Just two registers, a base and
1749 // an offset. No shifts, negations or any other complicating factors.
1750 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1751 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1752 return false;
1753 return isARMLowRegister(Memory.BaseRegNum) &&
1754 (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
1755 }
1756
1757 bool isMemThumbRIs4() const {
1758 if (!isGPRMem() || Memory.OffsetRegNum ||
1759 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1760 return false;
1761 // Immediate offset, multiple of 4 in range [0, 124].
1762 if (!Memory.OffsetImm) return true;
1763 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1764 int64_t Val = CE->getValue();
1765 return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1766 }
1767 return false;
1768 }
1769
1770 bool isMemThumbRIs2() const {
1771 if (!isGPRMem() || Memory.OffsetRegNum ||
1772 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1773 return false;
1774 // Immediate offset, multiple of 4 in range [0, 62].
1775 if (!Memory.OffsetImm) return true;
1776 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1777 int64_t Val = CE->getValue();
1778 return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1779 }
1780 return false;
1781 }
1782
1783 bool isMemThumbRIs1() const {
1784 if (!isGPRMem() || Memory.OffsetRegNum ||
1785 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1786 return false;
1787 // Immediate offset in range [0, 31].
1788 if (!Memory.OffsetImm) return true;
1789 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1790 int64_t Val = CE->getValue();
1791 return Val >= 0 && Val <= 31;
1792 }
1793 return false;
1794 }
1795
1796 bool isMemThumbSPI() const {
1797 if (!isGPRMem() || Memory.OffsetRegNum || Memory.BaseRegNum != ARM::SP ||
1798 Memory.Alignment != 0)
1799 return false;
1800 // Immediate offset, multiple of 4 in range [0, 1020].
1801 if (!Memory.OffsetImm) return true;
1802 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1803 int64_t Val = CE->getValue();
1804 return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1805 }
1806 return false;
1807 }
1808
1809 bool isMemImm8s4Offset() const {
1810 // If we have an immediate that's not a constant, treat it as a label
1811 // reference needing a fixup. If it is a constant, it's something else
1812 // and we reject it.
1813 if (isImm() && !isa<MCConstantExpr>(getImm()))
1814 return true;
1815 if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
1816 return false;
1817 // Immediate offset a multiple of 4 in range [-1020, 1020].
1818 if (!Memory.OffsetImm) return true;
1819 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1820 int64_t Val = CE->getValue();
1821 // Special case, #-0 is std::numeric_limits<int32_t>::min().
1822 return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) ||
1823 Val == std::numeric_limits<int32_t>::min();
1824 }
1825 return false;
1826 }
1827
1828 bool isMemImm7s4Offset() const {
1829 // If we have an immediate that's not a constant, treat it as a label
1830 // reference needing a fixup. If it is a constant, it's something else
1831 // and we reject it.
1832 if (isImm() && !isa<MCConstantExpr>(getImm()))
1833 return true;
1834 if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0 ||
1835 !ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1836 Memory.BaseRegNum))
1837 return false;
1838 // Immediate offset a multiple of 4 in range [-508, 508].
1839 if (!Memory.OffsetImm) return true;
1840 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1841 int64_t Val = CE->getValue();
1842 // Special case, #-0 is INT32_MIN.
1843 return (Val >= -508 && Val <= 508 && (Val & 3) == 0) || Val == INT32_MIN;
1844 }
1845 return false;
1846 }
1847
1848 bool isMemImm0_1020s4Offset() const {
1849 if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
1850 return false;
1851 // Immediate offset a multiple of 4 in range [0, 1020].
1852 if (!Memory.OffsetImm) return true;
1853 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1854 int64_t Val = CE->getValue();
1855 return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1856 }
1857 return false;
1858 }
1859
1860 bool isMemImm8Offset() const {
1861 if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
1862 return false;
1863 // Base reg of PC isn't allowed for these encodings.
1864 if (Memory.BaseRegNum == ARM::PC) return false;
1865 // Immediate offset in range [-255, 255].
1866 if (!Memory.OffsetImm) return true;
1867 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1868 int64_t Val = CE->getValue();
1869 return (Val == std::numeric_limits<int32_t>::min()) ||
1870 (Val > -256 && Val < 256);
1871 }
1872 return false;
1873 }
1874
1875 template<unsigned Bits, unsigned RegClassID>
1876 bool isMemImm7ShiftedOffset() const {
1877 if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0 ||
1878 !ARMMCRegisterClasses[RegClassID].contains(Memory.BaseRegNum))
1879 return false;
1880
1881 // Expect an immediate offset equal to an element of the range
1882 // [-127, 127], shifted left by Bits.
1883
1884 if (!Memory.OffsetImm) return true;
1885 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1886 int64_t Val = CE->getValue();
1887
1888 // INT32_MIN is a special-case value (indicating the encoding with
1889 // zero offset and the subtract bit set)
1890 if (Val == INT32_MIN)
1891 return true;
1892
1893 unsigned Divisor = 1U << Bits;
1894
1895 // Check that the low bits are zero
1896 if (Val % Divisor != 0)
1897 return false;
1898
1899 // Check that the remaining offset is within range.
1900 Val /= Divisor;
1901 return (Val >= -127 && Val <= 127);
1902 }
1903 return false;
1904 }
1905
1906 template <int shift> bool isMemRegRQOffset() const {
1907 if (!isMVEMem() || Memory.OffsetImm != nullptr || Memory.Alignment != 0)
1908 return false;
1909
1910 if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1911 Memory.BaseRegNum))
1912 return false;
1913 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1914 Memory.OffsetRegNum))
1915 return false;
1916
1917 if (shift == 0 && Memory.ShiftType != ARM_AM::no_shift)
1918 return false;
1919
1920 if (shift > 0 &&
1921 (Memory.ShiftType != ARM_AM::uxtw || Memory.ShiftImm != shift))
1922 return false;
1923
1924 return true;
1925 }
1926
1927 template <int shift> bool isMemRegQOffset() const {
1928 if (!isMVEMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
1929 return false;
1930
1931 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1932 Memory.BaseRegNum))
1933 return false;
1934
1935 if (!Memory.OffsetImm)
1936 return true;
1937 static_assert(shift < 56,
1938 "Such that we dont shift by a value higher than 62");
1939 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1940 int64_t Val = CE->getValue();
1941
1942 // The value must be a multiple of (1 << shift)
1943 if ((Val & ((1U << shift) - 1)) != 0)
1944 return false;
1945
1946 // And be in the right range, depending on the amount that it is shifted
1947 // by. Shift 0, is equal to 7 unsigned bits, the sign bit is set
1948 // separately.
1949 int64_t Range = (1U << (7 + shift)) - 1;
1950 return (Val == INT32_MIN) || (Val > -Range && Val < Range);
1951 }
1952 return false;
1953 }
1954
1955 bool isMemPosImm8Offset() const {
1956 if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
1957 return false;
1958 // Immediate offset in range [0, 255].
1959 if (!Memory.OffsetImm) return true;
1960 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1961 int64_t Val = CE->getValue();
1962 return Val >= 0 && Val < 256;
1963 }
1964 return false;
1965 }
1966
1967 bool isMemNegImm8Offset() const {
1968 if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
1969 return false;
1970 // Base reg of PC isn't allowed for these encodings.
1971 if (Memory.BaseRegNum == ARM::PC) return false;
1972 // Immediate offset in range [-255, -1].
1973 if (!Memory.OffsetImm) return false;
1974 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1975 int64_t Val = CE->getValue();
1976 return (Val == std::numeric_limits<int32_t>::min()) ||
1977 (Val > -256 && Val < 0);
1978 }
1979 return false;
1980 }
1981
1982 bool isMemUImm12Offset() const {
1983 if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
1984 return false;
1985 // Immediate offset in range [0, 4095].
1986 if (!Memory.OffsetImm) return true;
1987 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1988 int64_t Val = CE->getValue();
1989 return (Val >= 0 && Val < 4096);
1990 }
1991 return false;
1992 }
1993
1994 bool isMemImm12Offset() const {
1995 // If we have an immediate that's not a constant, treat it as a label
1996 // reference needing a fixup. If it is a constant, it's something else
1997 // and we reject it.
1998
1999 if (isImm() && !isa<MCConstantExpr>(getImm()))
2000 return true;
2001
2002 if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
2003 return false;
2004 // Immediate offset in range [-4095, 4095].
2005 if (!Memory.OffsetImm) return true;
2006 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
2007 int64_t Val = CE->getValue();
2008 return (Val > -4096 && Val < 4096) ||
2009 (Val == std::numeric_limits<int32_t>::min());
2010 }
2011 // If we have an immediate that's not a constant, treat it as a
2012 // symbolic expression needing a fixup.
2013 return true;
2014 }
2015
2016 bool isConstPoolAsmImm() const {
2017 // Delay processing of Constant Pool Immediate, this will turn into
2018 // a constant. Match no other operand
2019 return (isConstantPoolImm());
2020 }
2021
2022 bool isPostIdxImm8() const {
2023 if (!isImm()) return false;
2024 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2025 if (!CE) return false;
2026 int64_t Val = CE->getValue();
2027 return (Val > -256 && Val < 256) ||
2028 (Val == std::numeric_limits<int32_t>::min());
2029 }
2030
2031 bool isPostIdxImm8s4() const {
2032 if (!isImm()) return false;
2033 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2034 if (!CE) return false;
2035 int64_t Val = CE->getValue();
2036 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
2037 (Val == std::numeric_limits<int32_t>::min());
2038 }
2039
2040 bool isMSRMask() const { return Kind == k_MSRMask; }
2041 bool isBankedReg() const { return Kind == k_BankedReg; }
2042 bool isProcIFlags() const { return Kind == k_ProcIFlags; }
2043
2044 // NEON operands.
2045 bool isAnyVectorList() const {
2046 return Kind == k_VectorList || Kind == k_VectorListAllLanes ||
2047 Kind == k_VectorListIndexed;
2048 }
2049
2050 bool isVectorList() const { return Kind == k_VectorList; }
2051
2052 bool isSingleSpacedVectorList() const {
2053 return Kind == k_VectorList && !VectorList.isDoubleSpaced;
2054 }
2055
2056 bool isDoubleSpacedVectorList() const {
2057 return Kind == k_VectorList && VectorList.isDoubleSpaced;
2058 }
2059
2060 bool isVecListOneD() const {
2061 // We convert a single D reg to a list containing a D reg
2062 if (isDReg() && !Parser->hasMVE())
2063 return true;
2064 if (!isSingleSpacedVectorList()) return false;
2065 return VectorList.Count == 1;
2066 }
2067
2068 bool isVecListTwoMQ() const {
2069 return isSingleSpacedVectorList() && VectorList.Count == 2 &&
2070 ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2071 VectorList.RegNum);
2072 }
2073
2074 bool isVecListDPair() const {
2075 // We convert a single Q reg to a list with the two corresponding D
2076 // registers
2077 if (isQReg() && !Parser->hasMVE())
2078 return true;
2079 if (!isSingleSpacedVectorList()) return false;
2080 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2081 .contains(VectorList.RegNum));
2082 }
2083
2084 bool isVecListThreeD() const {
2085 if (!isSingleSpacedVectorList()) return false;
2086 return VectorList.Count == 3;
2087 }
2088
2089 bool isVecListFourD() const {
2090 if (!isSingleSpacedVectorList()) return false;
2091 return VectorList.Count == 4;
2092 }
2093
2094 bool isVecListDPairSpaced() const {
2095 if (Kind != k_VectorList) return false;
2096 if (isSingleSpacedVectorList()) return false;
2097 return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
2098 .contains(VectorList.RegNum));
2099 }
2100
2101 bool isVecListThreeQ() const {
2102 if (!isDoubleSpacedVectorList()) return false;
2103 return VectorList.Count == 3;
2104 }
2105
2106 bool isVecListFourQ() const {
2107 if (!isDoubleSpacedVectorList()) return false;
2108 return VectorList.Count == 4;
2109 }
2110
2111 bool isVecListFourMQ() const {
2112 return isSingleSpacedVectorList() && VectorList.Count == 4 &&
2113 ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2114 VectorList.RegNum);
2115 }
2116
2117 bool isSingleSpacedVectorAllLanes() const {
2118 return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
2119 }
2120
2121 bool isDoubleSpacedVectorAllLanes() const {
2122 return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
2123 }
2124
2125 bool isVecListOneDAllLanes() const {
2126 if (!isSingleSpacedVectorAllLanes()) return false;
2127 return VectorList.Count == 1;
2128 }
2129
2130 bool isVecListDPairAllLanes() const {
2131 if (!isSingleSpacedVectorAllLanes()) return false;
2132 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2133 .contains(VectorList.RegNum));
2134 }
2135
2136 bool isVecListDPairSpacedAllLanes() const {
2137 if (!isDoubleSpacedVectorAllLanes()) return false;
2138 return VectorList.Count == 2;
2139 }
2140
2141 bool isVecListThreeDAllLanes() const {
2142 if (!isSingleSpacedVectorAllLanes()) return false;
2143 return VectorList.Count == 3;
2144 }
2145
2146 bool isVecListThreeQAllLanes() const {
2147 if (!isDoubleSpacedVectorAllLanes()) return false;
2148 return VectorList.Count == 3;
2149 }
2150
2151 bool isVecListFourDAllLanes() const {
2152 if (!isSingleSpacedVectorAllLanes()) return false;
2153 return VectorList.Count == 4;
2154 }
2155
2156 bool isVecListFourQAllLanes() const {
2157 if (!isDoubleSpacedVectorAllLanes()) return false;
2158 return VectorList.Count == 4;
2159 }
2160
2161 bool isSingleSpacedVectorIndexed() const {
2162 return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
2163 }
2164
2165 bool isDoubleSpacedVectorIndexed() const {
2166 return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
2167 }
2168
2169 bool isVecListOneDByteIndexed() const {
2170 if (!isSingleSpacedVectorIndexed()) return false;
2171 return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
2172 }
2173
2174 bool isVecListOneDHWordIndexed() const {
2175 if (!isSingleSpacedVectorIndexed()) return false;
2176 return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
2177 }
2178
2179 bool isVecListOneDWordIndexed() const {
2180 if (!isSingleSpacedVectorIndexed()) return false;
2181 return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
2182 }
2183
2184 bool isVecListTwoDByteIndexed() const {
2185 if (!isSingleSpacedVectorIndexed()) return false;
2186 return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
2187 }
2188
2189 bool isVecListTwoDHWordIndexed() const {
2190 if (!isSingleSpacedVectorIndexed()) return false;
2191 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2192 }
2193
2194 bool isVecListTwoQWordIndexed() const {
2195 if (!isDoubleSpacedVectorIndexed()) return false;
2196 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2197 }
2198
2199 bool isVecListTwoQHWordIndexed() const {
2200 if (!isDoubleSpacedVectorIndexed()) return false;
2201 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2202 }
2203
2204 bool isVecListTwoDWordIndexed() const {
2205 if (!isSingleSpacedVectorIndexed()) return false;
2206 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2207 }
2208
2209 bool isVecListThreeDByteIndexed() const {
2210 if (!isSingleSpacedVectorIndexed()) return false;
2211 return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
2212 }
2213
2214 bool isVecListThreeDHWordIndexed() const {
2215 if (!isSingleSpacedVectorIndexed()) return false;
2216 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2217 }
2218
2219 bool isVecListThreeQWordIndexed() const {
2220 if (!isDoubleSpacedVectorIndexed()) return false;
2221 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2222 }
2223
2224 bool isVecListThreeQHWordIndexed() const {
2225 if (!isDoubleSpacedVectorIndexed()) return false;
2226 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2227 }
2228
2229 bool isVecListThreeDWordIndexed() const {
2230 if (!isSingleSpacedVectorIndexed()) return false;
2231 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2232 }
2233
2234 bool isVecListFourDByteIndexed() const {
2235 if (!isSingleSpacedVectorIndexed()) return false;
2236 return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
2237 }
2238
2239 bool isVecListFourDHWordIndexed() const {
2240 if (!isSingleSpacedVectorIndexed()) return false;
2241 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2242 }
2243
2244 bool isVecListFourQWordIndexed() const {
2245 if (!isDoubleSpacedVectorIndexed()) return false;
2246 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2247 }
2248
2249 bool isVecListFourQHWordIndexed() const {
2250 if (!isDoubleSpacedVectorIndexed()) return false;
2251 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2252 }
2253
2254 bool isVecListFourDWordIndexed() const {
2255 if (!isSingleSpacedVectorIndexed()) return false;
2256 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2257 }
2258
2259 bool isVectorIndex() const { return Kind == k_VectorIndex; }
2260
2261 template <unsigned NumLanes>
2262 bool isVectorIndexInRange() const {
2263 if (Kind != k_VectorIndex) return false;
2264 return VectorIndex.Val < NumLanes;
2265 }
2266
2267 bool isVectorIndex8() const { return isVectorIndexInRange<8>(); }
2268 bool isVectorIndex16() const { return isVectorIndexInRange<4>(); }
2269 bool isVectorIndex32() const { return isVectorIndexInRange<2>(); }
2270 bool isVectorIndex64() const { return isVectorIndexInRange<1>(); }
2271
2272 template<int PermittedValue, int OtherPermittedValue>
2273 bool isMVEPairVectorIndex() const {
2274 if (Kind != k_VectorIndex) return false;
2275 return VectorIndex.Val == PermittedValue ||
2276 VectorIndex.Val == OtherPermittedValue;
2277 }
2278
2279 bool isNEONi8splat() const {
2280 if (!isImm()) return false;
2281 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2282 // Must be a constant.
2283 if (!CE) return false;
2284 int64_t Value = CE->getValue();
2285 // i8 value splatted across 8 bytes. The immediate is just the 8 byte
2286 // value.
2287 return Value >= 0 && Value < 256;
2288 }
2289
2290 bool isNEONi16splat() const {
2291 if (isNEONByteReplicate(2))
2292 return false; // Leave that for bytes replication and forbid by default.
2293 if (!isImm())
2294 return false;
2295 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2296 // Must be a constant.
2297 if (!CE) return false;
2298 unsigned Value = CE->getValue();
2300 }
2301
2302 bool isNEONi16splatNot() const {
2303 if (!isImm())
2304 return false;
2305 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2306 // Must be a constant.
2307 if (!CE) return false;
2308 unsigned Value = CE->getValue();
2309 return ARM_AM::isNEONi16splat(~Value & 0xffff);
2310 }
2311
2312 bool isNEONi32splat() const {
2313 if (isNEONByteReplicate(4))
2314 return false; // Leave that for bytes replication and forbid by default.
2315 if (!isImm())
2316 return false;
2317 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2318 // Must be a constant.
2319 if (!CE) return false;
2320 unsigned Value = CE->getValue();
2322 }
2323
2324 bool isNEONi32splatNot() const {
2325 if (!isImm())
2326 return false;
2327 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2328 // Must be a constant.
2329 if (!CE) return false;
2330 unsigned Value = CE->getValue();
2332 }
2333
2334 static bool isValidNEONi32vmovImm(int64_t Value) {
2335 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
2336 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
2337 return ((Value & 0xffffffffffffff00) == 0) ||
2338 ((Value & 0xffffffffffff00ff) == 0) ||
2339 ((Value & 0xffffffffff00ffff) == 0) ||
2340 ((Value & 0xffffffff00ffffff) == 0) ||
2341 ((Value & 0xffffffffffff00ff) == 0xff) ||
2342 ((Value & 0xffffffffff00ffff) == 0xffff);
2343 }
2344
2345 bool isNEONReplicate(unsigned Width, unsigned NumElems, bool Inv) const {
2346 assert((Width == 8 || Width == 16 || Width == 32) &&
2347 "Invalid element width");
2348 assert(NumElems * Width <= 64 && "Invalid result width");
2349
2350 if (!isImm())
2351 return false;
2352 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2353 // Must be a constant.
2354 if (!CE)
2355 return false;
2356 int64_t Value = CE->getValue();
2357 if (!Value)
2358 return false; // Don't bother with zero.
2359 if (Inv)
2360 Value = ~Value;
2361
2362 uint64_t Mask = (1ull << Width) - 1;
2363 uint64_t Elem = Value & Mask;
2364 if (Width == 16 && (Elem & 0x00ff) != 0 && (Elem & 0xff00) != 0)
2365 return false;
2366 if (Width == 32 && !isValidNEONi32vmovImm(Elem))
2367 return false;
2368
2369 for (unsigned i = 1; i < NumElems; ++i) {
2370 Value >>= Width;
2371 if ((Value & Mask) != Elem)
2372 return false;
2373 }
2374 return true;
2375 }
2376
2377 bool isNEONByteReplicate(unsigned NumBytes) const {
2378 return isNEONReplicate(8, NumBytes, false);
2379 }
2380
2381 static void checkNeonReplicateArgs(unsigned FromW, unsigned ToW) {
2382 assert((FromW == 8 || FromW == 16 || FromW == 32) &&
2383 "Invalid source width");
2384 assert((ToW == 16 || ToW == 32 || ToW == 64) &&
2385 "Invalid destination width");
2386 assert(FromW < ToW && "ToW is not less than FromW");
2387 }
2388
2389 template<unsigned FromW, unsigned ToW>
2390 bool isNEONmovReplicate() const {
2391 checkNeonReplicateArgs(FromW, ToW);
2392 if (ToW == 64 && isNEONi64splat())
2393 return false;
2394 return isNEONReplicate(FromW, ToW / FromW, false);
2395 }
2396
2397 template<unsigned FromW, unsigned ToW>
2398 bool isNEONinvReplicate() const {
2399 checkNeonReplicateArgs(FromW, ToW);
2400 return isNEONReplicate(FromW, ToW / FromW, true);
2401 }
2402
2403 bool isNEONi32vmov() const {
2404 if (isNEONByteReplicate(4))
2405 return false; // Let it to be classified as byte-replicate case.
2406 if (!isImm())
2407 return false;
2408 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2409 // Must be a constant.
2410 if (!CE)
2411 return false;
2412 return isValidNEONi32vmovImm(CE->getValue());
2413 }
2414
2415 bool isNEONi32vmovNeg() const {
2416 if (!isImm()) return false;
2417 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2418 // Must be a constant.
2419 if (!CE) return false;
2420 return isValidNEONi32vmovImm(~CE->getValue());
2421 }
2422
2423 bool isNEONi64splat() const {
2424 if (!isImm()) return false;
2425 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2426 // Must be a constant.
2427 if (!CE) return false;
2428 uint64_t Value = CE->getValue();
2429 // i64 value with each byte being either 0 or 0xff.
2430 for (unsigned i = 0; i < 8; ++i, Value >>= 8)
2431 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
2432 return true;
2433 }
2434
2435 template<int64_t Angle, int64_t Remainder>
2436 bool isComplexRotation() const {
2437 if (!isImm()) return false;
2438
2439 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2440 if (!CE) return false;
2441 uint64_t Value = CE->getValue();
2442
2443 return (Value % Angle == Remainder && Value <= 270);
2444 }
2445
2446 bool isMVELongShift() const {
2447 if (!isImm()) return false;
2448 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2449 // Must be a constant.
2450 if (!CE) return false;
2451 uint64_t Value = CE->getValue();
2452 return Value >= 1 && Value <= 32;
2453 }
2454
2455 bool isMveSaturateOp() const {
2456 if (!isImm()) return false;
2457 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2458 if (!CE) return false;
2459 uint64_t Value = CE->getValue();
2460 return Value == 48 || Value == 64;
2461 }
2462
2463 bool isITCondCodeNoAL() const {
2464 if (!isITCondCode()) return false;
2466 return CC != ARMCC::AL;
2467 }
2468
2469 bool isITCondCodeRestrictedI() const {
2470 if (!isITCondCode())
2471 return false;
2473 return CC == ARMCC::EQ || CC == ARMCC::NE;
2474 }
2475
2476 bool isITCondCodeRestrictedS() const {
2477 if (!isITCondCode())
2478 return false;
2480 return CC == ARMCC::LT || CC == ARMCC::GT || CC == ARMCC::LE ||
2481 CC == ARMCC::GE;
2482 }
2483
2484 bool isITCondCodeRestrictedU() const {
2485 if (!isITCondCode())
2486 return false;
2488 return CC == ARMCC::HS || CC == ARMCC::HI;
2489 }
2490
2491 bool isITCondCodeRestrictedFP() const {
2492 if (!isITCondCode())
2493 return false;
2495 return CC == ARMCC::EQ || CC == ARMCC::NE || CC == ARMCC::LT ||
2496 CC == ARMCC::GT || CC == ARMCC::LE || CC == ARMCC::GE;
2497 }
2498
2499 void setVecListDPair(unsigned int DPair) {
2500 Kind = k_VectorList;
2501 VectorList.RegNum = DPair;
2502 VectorList.Count = 2;
2503 VectorList.isDoubleSpaced = false;
2504 }
2505
2506 void setVecListOneD(unsigned int DReg) {
2507 Kind = k_VectorList;
2508 VectorList.RegNum = DReg;
2509 VectorList.Count = 1;
2510 VectorList.isDoubleSpaced = false;
2511 }
2512
2513 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
2514 // Add as immediates when possible. Null MCExpr = 0.
2515 if (!Expr)
2517 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
2518 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2519 else
2521 }
2522
2523 void addARMBranchTargetOperands(MCInst &Inst, unsigned N) const {
2524 assert(N == 1 && "Invalid number of operands!");
2525 addExpr(Inst, getImm());
2526 }
2527
2528 void addThumbBranchTargetOperands(MCInst &Inst, unsigned N) const {
2529 assert(N == 1 && "Invalid number of operands!");
2530 addExpr(Inst, getImm());
2531 }
2532
2533 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
2534 assert(N == 2 && "Invalid number of operands!");
2535 Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2536 unsigned RegNum = getCondCode() == ARMCC::AL ? ARM::NoRegister : ARM::CPSR;
2537 Inst.addOperand(MCOperand::createReg(RegNum));
2538 }
2539
2540 void addVPTPredNOperands(MCInst &Inst, unsigned N) const {
2541 assert(N == 3 && "Invalid number of operands!");
2542 Inst.addOperand(MCOperand::createImm(unsigned(getVPTPred())));
2543 unsigned RegNum = getVPTPred() == ARMVCC::None ? ARM::NoRegister : ARM::P0;
2544 Inst.addOperand(MCOperand::createReg(RegNum));
2546 }
2547
2548 void addVPTPredROperands(MCInst &Inst, unsigned N) const {
2549 assert(N == 4 && "Invalid number of operands!");
2550 addVPTPredNOperands(Inst, N-1);
2551 MCRegister RegNum;
2552 if (getVPTPred() == ARMVCC::None) {
2553 RegNum = ARM::NoRegister;
2554 } else {
2555 unsigned NextOpIndex = Inst.getNumOperands();
2556 auto &MCID = Parser->getInstrDesc(Inst.getOpcode());
2557 int TiedOp = MCID.getOperandConstraint(NextOpIndex, MCOI::TIED_TO);
2558 assert(TiedOp >= 0 &&
2559 "Inactive register in vpred_r is not tied to an output!");
2560 RegNum = Inst.getOperand(TiedOp).getReg();
2561 }
2562 Inst.addOperand(MCOperand::createReg(RegNum));
2563 }
2564
2565 void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
2566 assert(N == 1 && "Invalid number of operands!");
2567 Inst.addOperand(MCOperand::createImm(getCoproc()));
2568 }
2569
2570 void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
2571 assert(N == 1 && "Invalid number of operands!");
2572 Inst.addOperand(MCOperand::createImm(getCoproc()));
2573 }
2574
2575 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
2576 assert(N == 1 && "Invalid number of operands!");
2577 Inst.addOperand(MCOperand::createImm(CoprocOption.Val));
2578 }
2579
2580 void addITMaskOperands(MCInst &Inst, unsigned N) const {
2581 assert(N == 1 && "Invalid number of operands!");
2582 Inst.addOperand(MCOperand::createImm(ITMask.Mask));
2583 }
2584
2585 void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
2586 assert(N == 1 && "Invalid number of operands!");
2587 Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2588 }
2589
2590 void addITCondCodeInvOperands(MCInst &Inst, unsigned N) const {
2591 assert(N == 1 && "Invalid number of operands!");
2593 }
2594
2595 void addCCOutOperands(MCInst &Inst, unsigned N) const {
2596 assert(N == 1 && "Invalid number of operands!");
2598 }
2599
2600 void addRegOperands(MCInst &Inst, unsigned N) const {
2601 assert(N == 1 && "Invalid number of operands!");
2603 }
2604
2605 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
2606 assert(N == 3 && "Invalid number of operands!");
2607 assert(isRegShiftedReg() &&
2608 "addRegShiftedRegOperands() on non-RegShiftedReg!");
2609 Inst.addOperand(MCOperand::createReg(RegShiftedReg.SrcReg));
2610 Inst.addOperand(MCOperand::createReg(RegShiftedReg.ShiftReg));
2612 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
2613 }
2614
2615 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
2616 assert(N == 2 && "Invalid number of operands!");
2617 assert(isRegShiftedImm() &&
2618 "addRegShiftedImmOperands() on non-RegShiftedImm!");
2619 Inst.addOperand(MCOperand::createReg(RegShiftedImm.SrcReg));
2620 // Shift of #32 is encoded as 0 where permitted
2621 unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
2623 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
2624 }
2625
2626 void addShifterImmOperands(MCInst &Inst, unsigned N) const {
2627 assert(N == 1 && "Invalid number of operands!");
2628 Inst.addOperand(MCOperand::createImm((ShifterImm.isASR << 5) |
2629 ShifterImm.Imm));
2630 }
2631
2632 void addRegListOperands(MCInst &Inst, unsigned N) const {
2633 assert(N == 1 && "Invalid number of operands!");
2634 const SmallVectorImpl<MCRegister> &RegList = getRegList();
2635 for (MCRegister Reg : RegList)
2637 }
2638
2639 void addRegListWithAPSROperands(MCInst &Inst, unsigned N) const {
2640 assert(N == 1 && "Invalid number of operands!");
2641 const SmallVectorImpl<MCRegister> &RegList = getRegList();
2642 for (MCRegister Reg : RegList)
2644 }
2645
2646 void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
2647 addRegListOperands(Inst, N);
2648 }
2649
2650 void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
2651 addRegListOperands(Inst, N);
2652 }
2653
2654 void addFPSRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2655 addRegListOperands(Inst, N);
2656 }
2657
2658 void addFPDRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2659 addRegListOperands(Inst, N);
2660 }
2661
2662 void addRotImmOperands(MCInst &Inst, unsigned N) const {
2663 assert(N == 1 && "Invalid number of operands!");
2664 // Encoded as val>>3. The printer handles display as 8, 16, 24.
2665 Inst.addOperand(MCOperand::createImm(RotImm.Imm >> 3));
2666 }
2667
2668 void addModImmOperands(MCInst &Inst, unsigned N) const {
2669 assert(N == 1 && "Invalid number of operands!");
2670
2671 // Support for fixups (MCFixup)
2672 if (isImm())
2673 return addImmOperands(Inst, N);
2674
2675 Inst.addOperand(MCOperand::createImm(ModImm.Bits | (ModImm.Rot << 7)));
2676 }
2677
2678 void addModImmNotOperands(MCInst &Inst, unsigned N) const {
2679 assert(N == 1 && "Invalid number of operands!");
2680 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2681 uint32_t Enc = ARM_AM::getSOImmVal(~CE->getValue());
2683 }
2684
2685 void addModImmNegOperands(MCInst &Inst, unsigned N) const {
2686 assert(N == 1 && "Invalid number of operands!");
2687 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2688 uint32_t Enc = ARM_AM::getSOImmVal(-CE->getValue());
2690 }
2691
2692 void addThumbModImmNeg8_255Operands(MCInst &Inst, unsigned N) const {
2693 assert(N == 1 && "Invalid number of operands!");
2694 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2695 uint32_t Val = -CE->getValue();
2697 }
2698
2699 void addThumbModImmNeg1_7Operands(MCInst &Inst, unsigned N) const {
2700 assert(N == 1 && "Invalid number of operands!");
2701 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2702 uint32_t Val = -CE->getValue();
2704 }
2705
2706 void addBitfieldOperands(MCInst &Inst, unsigned N) const {
2707 assert(N == 1 && "Invalid number of operands!");
2708 // Munge the lsb/width into a bitfield mask.
2709 unsigned lsb = Bitfield.LSB;
2710 unsigned width = Bitfield.Width;
2711 // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
2712 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
2713 (32 - (lsb + width)));
2714 Inst.addOperand(MCOperand::createImm(Mask));
2715 }
2716
2717 void addImmOperands(MCInst &Inst, unsigned N) const {
2718 assert(N == 1 && "Invalid number of operands!");
2719 addExpr(Inst, getImm());
2720 }
2721
2722 void addFBits16Operands(MCInst &Inst, unsigned N) const {
2723 assert(N == 1 && "Invalid number of operands!");
2724 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2725 Inst.addOperand(MCOperand::createImm(16 - CE->getValue()));
2726 }
2727
2728 void addFBits32Operands(MCInst &Inst, unsigned N) const {
2729 assert(N == 1 && "Invalid number of operands!");
2730 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2731 Inst.addOperand(MCOperand::createImm(32 - CE->getValue()));
2732 }
2733
2734 void addFPImmOperands(MCInst &Inst, unsigned N) const {
2735 assert(N == 1 && "Invalid number of operands!");
2736 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2737 int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
2739 }
2740
2741 void addImm8s4Operands(MCInst &Inst, unsigned N) const {
2742 assert(N == 1 && "Invalid number of operands!");
2743 // FIXME: We really want to scale the value here, but the LDRD/STRD
2744 // instruction don't encode operands that way yet.
2745 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2746 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2747 }
2748
2749 void addImm7s4Operands(MCInst &Inst, unsigned N) const {
2750 assert(N == 1 && "Invalid number of operands!");
2751 // FIXME: We really want to scale the value here, but the VSTR/VLDR_VSYSR
2752 // instruction don't encode operands that way yet.
2753 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2754 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2755 }
2756
2757 void addImm7Shift0Operands(MCInst &Inst, unsigned N) const {
2758 assert(N == 1 && "Invalid number of operands!");
2759 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2760 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2761 }
2762
2763 void addImm7Shift1Operands(MCInst &Inst, unsigned N) const {
2764 assert(N == 1 && "Invalid number of operands!");
2765 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2766 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2767 }
2768
2769 void addImm7Shift2Operands(MCInst &Inst, unsigned N) const {
2770 assert(N == 1 && "Invalid number of operands!");
2771 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2772 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2773 }
2774
2775 void addImm7Operands(MCInst &Inst, unsigned N) const {
2776 assert(N == 1 && "Invalid number of operands!");
2777 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2778 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2779 }
2780
2781 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
2782 assert(N == 1 && "Invalid number of operands!");
2783 // The immediate is scaled by four in the encoding and is stored
2784 // in the MCInst as such. Lop off the low two bits here.
2785 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2786 Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2787 }
2788
2789 void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
2790 assert(N == 1 && "Invalid number of operands!");
2791 // The immediate is scaled by four in the encoding and is stored
2792 // in the MCInst as such. Lop off the low two bits here.
2793 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2794 Inst.addOperand(MCOperand::createImm(-(CE->getValue() / 4)));
2795 }
2796
2797 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
2798 assert(N == 1 && "Invalid number of operands!");
2799 // The immediate is scaled by four in the encoding and is stored
2800 // in the MCInst as such. Lop off the low two bits here.
2801 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2802 Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2803 }
2804
2805 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
2806 assert(N == 1 && "Invalid number of operands!");
2807 // The constant encodes as the immediate-1, and we store in the instruction
2808 // the bits as encoded, so subtract off one here.
2809 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2810 Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2811 }
2812
2813 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
2814 assert(N == 1 && "Invalid number of operands!");
2815 // The constant encodes as the immediate-1, and we store in the instruction
2816 // the bits as encoded, so subtract off one here.
2817 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2818 Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2819 }
2820
2821 void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
2822 assert(N == 1 && "Invalid number of operands!");
2823 // The constant encodes as the immediate, except for 32, which encodes as
2824 // zero.
2825 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2826 unsigned Imm = CE->getValue();
2827 Inst.addOperand(MCOperand::createImm((Imm == 32 ? 0 : Imm)));
2828 }
2829
2830 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
2831 assert(N == 1 && "Invalid number of operands!");
2832 // An ASR value of 32 encodes as 0, so that's how we want to add it to
2833 // the instruction as well.
2834 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2835 int Val = CE->getValue();
2836 Inst.addOperand(MCOperand::createImm(Val == 32 ? 0 : Val));
2837 }
2838
2839 void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
2840 assert(N == 1 && "Invalid number of operands!");
2841 // The operand is actually a t2_so_imm, but we have its bitwise
2842 // negation in the assembly source, so twiddle it here.
2843 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2844 Inst.addOperand(MCOperand::createImm(~(uint32_t)CE->getValue()));
2845 }
2846
2847 void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
2848 assert(N == 1 && "Invalid number of operands!");
2849 // The operand is actually a t2_so_imm, but we have its
2850 // negation in the assembly source, so twiddle it here.
2851 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2852 Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2853 }
2854
2855 void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
2856 assert(N == 1 && "Invalid number of operands!");
2857 // The operand is actually an imm0_4095, but we have its
2858 // negation in the assembly source, so twiddle it here.
2859 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2860 Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2861 }
2862
2863 void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const {
2864 if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
2865 Inst.addOperand(MCOperand::createImm(CE->getValue() >> 2));
2866 return;
2867 }
2868 const MCSymbolRefExpr *SR = cast<MCSymbolRefExpr>(Imm.Val);
2870 }
2871
2872 void addThumbMemPCOperands(MCInst &Inst, unsigned N) const {
2873 assert(N == 1 && "Invalid number of operands!");
2874 if (isImm()) {
2875 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2876 if (CE) {
2877 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2878 return;
2879 }
2880 const MCSymbolRefExpr *SR = cast<MCSymbolRefExpr>(Imm.Val);
2882 return;
2883 }
2884
2885 assert(isGPRMem() && "Unknown value type!");
2886 assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!");
2887 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
2888 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2889 else
2890 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
2891 }
2892
2893 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
2894 assert(N == 1 && "Invalid number of operands!");
2895 Inst.addOperand(MCOperand::createImm(unsigned(getMemBarrierOpt())));
2896 }
2897
2898 void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2899 assert(N == 1 && "Invalid number of operands!");
2900 Inst.addOperand(MCOperand::createImm(unsigned(getInstSyncBarrierOpt())));
2901 }
2902
2903 void addTraceSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2904 assert(N == 1 && "Invalid number of operands!");
2905 Inst.addOperand(MCOperand::createImm(unsigned(getTraceSyncBarrierOpt())));
2906 }
2907
2908 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
2909 assert(N == 1 && "Invalid number of operands!");
2910 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2911 }
2912
2913 void addMemNoOffsetT2Operands(MCInst &Inst, unsigned N) const {
2914 assert(N == 1 && "Invalid number of operands!");
2915 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2916 }
2917
2918 void addMemNoOffsetT2NoSpOperands(MCInst &Inst, unsigned N) const {
2919 assert(N == 1 && "Invalid number of operands!");
2920 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2921 }
2922
2923 void addMemNoOffsetTOperands(MCInst &Inst, unsigned N) const {
2924 assert(N == 1 && "Invalid number of operands!");
2925 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2926 }
2927
2928 void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
2929 assert(N == 1 && "Invalid number of operands!");
2930 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
2931 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2932 else
2933 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
2934 }
2935
2936 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2937 assert(N == 1 && "Invalid number of operands!");
2938 assert(isImm() && "Not an immediate!");
2939
2940 // If we have an immediate that's not a constant, treat it as a label
2941 // reference needing a fixup.
2942 if (!isa<MCConstantExpr>(getImm())) {
2943 Inst.addOperand(MCOperand::createExpr(getImm()));
2944 return;
2945 }
2946
2947 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2948 int Val = CE->getValue();
2950 }
2951
2952 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
2953 assert(N == 2 && "Invalid number of operands!");
2954 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2955 Inst.addOperand(MCOperand::createImm(Memory.Alignment));
2956 }
2957
2958 void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2959 addAlignedMemoryOperands(Inst, N);
2960 }
2961
2962 void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2963 addAlignedMemoryOperands(Inst, N);
2964 }
2965
2966 void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2967 addAlignedMemoryOperands(Inst, N);
2968 }
2969
2970 void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2971 addAlignedMemoryOperands(Inst, N);
2972 }
2973
2974 void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2975 addAlignedMemoryOperands(Inst, N);
2976 }
2977
2978 void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2979 addAlignedMemoryOperands(Inst, N);
2980 }
2981
2982 void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2983 addAlignedMemoryOperands(Inst, N);
2984 }
2985
2986 void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2987 addAlignedMemoryOperands(Inst, N);
2988 }
2989
2990 void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2991 addAlignedMemoryOperands(Inst, N);
2992 }
2993
2994 void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2995 addAlignedMemoryOperands(Inst, N);
2996 }
2997
2998 void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const {
2999 addAlignedMemoryOperands(Inst, N);
3000 }
3001
3002 void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
3003 assert(N == 3 && "Invalid number of operands!");
3004 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3005 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3006 if (!Memory.OffsetRegNum) {
3007 if (!Memory.OffsetImm)
3009 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3010 int32_t Val = CE->getValue();
3012 // Special case for #-0
3013 if (Val == std::numeric_limits<int32_t>::min())
3014 Val = 0;
3015 if (Val < 0)
3016 Val = -Val;
3017 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
3019 } else
3020 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3021 } else {
3022 // For register offset, we encode the shift type and negation flag
3023 // here.
3024 int32_t Val =
3026 Memory.ShiftImm, Memory.ShiftType);
3028 }
3029 }
3030
3031 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
3032 assert(N == 2 && "Invalid number of operands!");
3033 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3034 assert(CE && "non-constant AM2OffsetImm operand!");
3035 int32_t Val = CE->getValue();
3037 // Special case for #-0
3038 if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
3039 if (Val < 0) Val = -Val;
3040 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
3043 }
3044
3045 void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
3046 assert(N == 3 && "Invalid number of operands!");
3047 // If we have an immediate that's not a constant, treat it as a label
3048 // reference needing a fixup. If it is a constant, it's something else
3049 // and we reject it.
3050 if (isImm()) {
3051 Inst.addOperand(MCOperand::createExpr(getImm()));
3054 return;
3055 }
3056
3057 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3058 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3059 if (!Memory.OffsetRegNum) {
3060 if (!Memory.OffsetImm)
3062 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3063 int32_t Val = CE->getValue();
3065 // Special case for #-0
3066 if (Val == std::numeric_limits<int32_t>::min())
3067 Val = 0;
3068 if (Val < 0)
3069 Val = -Val;
3070 Val = ARM_AM::getAM3Opc(AddSub, Val);
3072 } else
3073 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3074 } else {
3075 // For register offset, we encode the shift type and negation flag
3076 // here.
3077 int32_t Val =
3080 }
3081 }
3082
3083 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
3084 assert(N == 2 && "Invalid number of operands!");
3085 if (Kind == k_PostIndexRegister) {
3086 int32_t Val =
3087 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
3088 Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3090 return;
3091 }
3092
3093 // Constant offset.
3094 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
3095 int32_t Val = CE->getValue();
3097 // Special case for #-0
3098 if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
3099 if (Val < 0) Val = -Val;
3100 Val = ARM_AM::getAM3Opc(AddSub, Val);
3103 }
3104
3105 void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
3106 assert(N == 2 && "Invalid number of operands!");
3107 // If we have an immediate that's not a constant, treat it as a label
3108 // reference needing a fixup. If it is a constant, it's something else
3109 // and we reject it.
3110 if (isImm()) {
3111 Inst.addOperand(MCOperand::createExpr(getImm()));
3113 return;
3114 }
3115
3116 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3117 if (!Memory.OffsetImm)
3119 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3120 // The lower two bits are always zero and as such are not encoded.
3121 int32_t Val = CE->getValue() / 4;
3123 // Special case for #-0
3124 if (Val == std::numeric_limits<int32_t>::min())
3125 Val = 0;
3126 if (Val < 0)
3127 Val = -Val;
3128 Val = ARM_AM::getAM5Opc(AddSub, Val);
3130 } else
3131 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3132 }
3133
3134 void addAddrMode5FP16Operands(MCInst &Inst, unsigned N) const {
3135 assert(N == 2 && "Invalid number of operands!");
3136 // If we have an immediate that's not a constant, treat it as a label
3137 // reference needing a fixup. If it is a constant, it's something else
3138 // and we reject it.
3139 if (isImm()) {
3140 Inst.addOperand(MCOperand::createExpr(getImm()));
3142 return;
3143 }
3144
3145 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3146 // The lower bit is always zero and as such is not encoded.
3147 if (!Memory.OffsetImm)
3149 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3150 int32_t Val = CE->getValue() / 2;
3152 // Special case for #-0
3153 if (Val == std::numeric_limits<int32_t>::min())
3154 Val = 0;
3155 if (Val < 0)
3156 Val = -Val;
3157 Val = ARM_AM::getAM5FP16Opc(AddSub, Val);
3159 } else
3160 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3161 }
3162
3163 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
3164 assert(N == 2 && "Invalid number of operands!");
3165 // If we have an immediate that's not a constant, treat it as a label
3166 // reference needing a fixup. If it is a constant, it's something else
3167 // and we reject it.
3168 if (isImm()) {
3169 Inst.addOperand(MCOperand::createExpr(getImm()));
3171 return;
3172 }
3173
3174 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3175 addExpr(Inst, Memory.OffsetImm);
3176 }
3177
3178 void addMemImm7s4OffsetOperands(MCInst &Inst, unsigned N) const {
3179 assert(N == 2 && "Invalid number of operands!");
3180 // If we have an immediate that's not a constant, treat it as a label
3181 // reference needing a fixup. If it is a constant, it's something else
3182 // and we reject it.
3183 if (isImm()) {
3184 Inst.addOperand(MCOperand::createExpr(getImm()));
3186 return;
3187 }
3188
3189 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3190 addExpr(Inst, Memory.OffsetImm);
3191 }
3192
3193 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
3194 assert(N == 2 && "Invalid number of operands!");
3195 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3196 if (!Memory.OffsetImm)
3198 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3199 // The lower two bits are always zero and as such are not encoded.
3200 Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3201 else
3202 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3203 }
3204
3205 void addMemImmOffsetOperands(MCInst &Inst, unsigned N) const {
3206 assert(N == 2 && "Invalid number of operands!");
3207 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3208 addExpr(Inst, Memory.OffsetImm);
3209 }
3210
3211 void addMemRegRQOffsetOperands(MCInst &Inst, unsigned N) const {
3212 assert(N == 2 && "Invalid number of operands!");
3213 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3214 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3215 }
3216
3217 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
3218 assert(N == 2 && "Invalid number of operands!");
3219 // If this is an immediate, it's a label reference.
3220 if (isImm()) {
3221 addExpr(Inst, getImm());
3223 return;
3224 }
3225
3226 // Otherwise, it's a normal memory reg+offset.
3227 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3228 addExpr(Inst, Memory.OffsetImm);
3229 }
3230
3231 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
3232 assert(N == 2 && "Invalid number of operands!");
3233 // If this is an immediate, it's a label reference.
3234 if (isImm()) {
3235 addExpr(Inst, getImm());
3237 return;
3238 }
3239
3240 // Otherwise, it's a normal memory reg+offset.
3241 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3242 addExpr(Inst, Memory.OffsetImm);
3243 }
3244
3245 void addConstPoolAsmImmOperands(MCInst &Inst, unsigned N) const {
3246 assert(N == 1 && "Invalid number of operands!");
3247 // This is container for the immediate that we will create the constant
3248 // pool from
3249 addExpr(Inst, getConstantPoolImm());
3250 }
3251
3252 void addMemTBBOperands(MCInst &Inst, unsigned N) const {
3253 assert(N == 2 && "Invalid number of operands!");
3254 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3255 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3256 }
3257
3258 void addMemTBHOperands(MCInst &Inst, unsigned N) const {
3259 assert(N == 2 && "Invalid number of operands!");
3260 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3261 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3262 }
3263
3264 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
3265 assert(N == 3 && "Invalid number of operands!");
3266 unsigned Val =
3268 Memory.ShiftImm, Memory.ShiftType);
3269 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3270 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3272 }
3273
3274 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
3275 assert(N == 3 && "Invalid number of operands!");
3276 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3277 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3278 Inst.addOperand(MCOperand::createImm(Memory.ShiftImm));
3279 }
3280
3281 void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
3282 assert(N == 2 && "Invalid number of operands!");
3283 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3284 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3285 }
3286
3287 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
3288 assert(N == 2 && "Invalid number of operands!");
3289 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3290 if (!Memory.OffsetImm)
3292 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3293 // The lower two bits are always zero and as such are not encoded.
3294 Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3295 else
3296 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3297 }
3298
3299 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
3300 assert(N == 2 && "Invalid number of operands!");
3301 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3302 if (!Memory.OffsetImm)
3304 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3305 Inst.addOperand(MCOperand::createImm(CE->getValue() / 2));
3306 else
3307 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3308 }
3309
3310 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
3311 assert(N == 2 && "Invalid number of operands!");
3312 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3313 addExpr(Inst, Memory.OffsetImm);
3314 }
3315
3316 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
3317 assert(N == 2 && "Invalid number of operands!");
3318 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3319 if (!Memory.OffsetImm)
3321 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3322 // The lower two bits are always zero and as such are not encoded.
3323 Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3324 else
3325 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3326 }
3327
3328 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
3329 assert(N == 1 && "Invalid number of operands!");
3330 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3331 assert(CE && "non-constant post-idx-imm8 operand!");
3332 int Imm = CE->getValue();
3333 bool isAdd = Imm >= 0;
3334 if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
3335 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
3337 }
3338
3339 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
3340 assert(N == 1 && "Invalid number of operands!");
3341 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3342 assert(CE && "non-constant post-idx-imm8s4 operand!");
3343 int Imm = CE->getValue();
3344 bool isAdd = Imm >= 0;
3345 if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
3346 // Immediate is scaled by 4.
3347 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
3349 }
3350
3351 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
3352 assert(N == 2 && "Invalid number of operands!");
3353 Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3354 Inst.addOperand(MCOperand::createImm(PostIdxReg.isAdd));
3355 }
3356
3357 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
3358 assert(N == 2 && "Invalid number of operands!");
3359 Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3360 // The sign, shift type, and shift amount are encoded in a single operand
3361 // using the AM2 encoding helpers.
3362 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
3363 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
3364 PostIdxReg.ShiftTy);
3366 }
3367
3368 void addPowerTwoOperands(MCInst &Inst, unsigned N) const {
3369 assert(N == 1 && "Invalid number of operands!");
3370 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3371 Inst.addOperand(MCOperand::createImm(CE->getValue()));
3372 }
3373
3374 void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
3375 assert(N == 1 && "Invalid number of operands!");
3376 Inst.addOperand(MCOperand::createImm(unsigned(getMSRMask())));
3377 }
3378
3379 void addBankedRegOperands(MCInst &Inst, unsigned N) const {
3380 assert(N == 1 && "Invalid number of operands!");
3381 Inst.addOperand(MCOperand::createImm(unsigned(getBankedReg())));
3382 }
3383
3384 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
3385 assert(N == 1 && "Invalid number of operands!");
3386 Inst.addOperand(MCOperand::createImm(unsigned(getProcIFlags())));
3387 }
3388
3389 void addVecListOperands(MCInst &Inst, unsigned N) const {
3390 assert(N == 1 && "Invalid number of operands!");
3391
3392 if (isAnyVectorList())
3393 Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
3394 else if (isDReg() && !Parser->hasMVE()) {
3395 Inst.addOperand(MCOperand::createReg(Reg.RegNum));
3396 } else if (isQReg() && !Parser->hasMVE()) {
3397 MCRegister DPair = Parser->getDRegFromQReg(Reg.RegNum);
3398 DPair = Parser->getMRI()->getMatchingSuperReg(
3399 DPair, ARM::dsub_0, &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3400 Inst.addOperand(MCOperand::createReg(DPair));
3401 } else {
3402 LLVM_DEBUG(dbgs() << "TYPE: " << Kind << "\n");
3404 "attempted to add a vector list register with wrong type!");
3405 }
3406 }
3407
3408 void addMVEVecListOperands(MCInst &Inst, unsigned N) const {
3409 assert(N == 1 && "Invalid number of operands!");
3410
3411 // When we come here, the VectorList field will identify a range
3412 // of q-registers by its base register and length, and it will
3413 // have already been error-checked to be the expected length of
3414 // range and contain only q-regs in the range q0-q7. So we can
3415 // count on the base register being in the range q0-q6 (for 2
3416 // regs) or q0-q4 (for 4)
3417 //
3418 // The MVE instructions taking a register range of this kind will
3419 // need an operand in the MQQPR or MQQQQPR class, representing the
3420 // entire range as a unit. So we must translate into that class,
3421 // by finding the index of the base register in the MQPR reg
3422 // class, and returning the super-register at the corresponding
3423 // index in the target class.
3424
3425 const MCRegisterClass *RC_in = &ARMMCRegisterClasses[ARM::MQPRRegClassID];
3426 const MCRegisterClass *RC_out =
3427 (VectorList.Count == 2) ? &ARMMCRegisterClasses[ARM::MQQPRRegClassID]
3428 : &ARMMCRegisterClasses[ARM::MQQQQPRRegClassID];
3429
3430 unsigned I, E = RC_out->getNumRegs();
3431 for (I = 0; I < E; I++)
3432 if (RC_in->getRegister(I) == VectorList.RegNum)
3433 break;
3434 assert(I < E && "Invalid vector list start register!");
3435
3437 }
3438
3439 void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
3440 assert(N == 2 && "Invalid number of operands!");
3441 Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
3442 Inst.addOperand(MCOperand::createImm(VectorList.LaneIndex));
3443 }
3444
3445 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
3446 assert(N == 1 && "Invalid number of operands!");
3447 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3448 }
3449
3450 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
3451 assert(N == 1 && "Invalid number of operands!");
3452 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3453 }
3454
3455 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
3456 assert(N == 1 && "Invalid number of operands!");
3457 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3458 }
3459
3460 void addVectorIndex64Operands(MCInst &Inst, unsigned N) const {
3461 assert(N == 1 && "Invalid number of operands!");
3462 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3463 }
3464
3465 void addMVEVectorIndexOperands(MCInst &Inst, unsigned N) const {
3466 assert(N == 1 && "Invalid number of operands!");
3467 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3468 }
3469
3470 void addMVEPairVectorIndexOperands(MCInst &Inst, unsigned N) const {
3471 assert(N == 1 && "Invalid number of operands!");
3472 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3473 }
3474
3475 void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
3476 assert(N == 1 && "Invalid number of operands!");
3477 // The immediate encodes the type of constant as well as the value.
3478 // Mask in that this is an i8 splat.
3479 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3480 Inst.addOperand(MCOperand::createImm(CE->getValue() | 0xe00));
3481 }
3482
3483 void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
3484 assert(N == 1 && "Invalid number of operands!");
3485 // The immediate encodes the type of constant as well as the value.
3486 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3487 unsigned Value = CE->getValue();
3490 }
3491
3492 void addNEONi16splatNotOperands(MCInst &Inst, unsigned N) const {
3493 assert(N == 1 && "Invalid number of operands!");
3494 // The immediate encodes the type of constant as well as the value.
3495 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3496 unsigned Value = CE->getValue();
3499 }
3500
3501 void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
3502 assert(N == 1 && "Invalid number of operands!");
3503 // The immediate encodes the type of constant as well as the value.
3504 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3505 unsigned Value = CE->getValue();
3508 }
3509
3510 void addNEONi32splatNotOperands(MCInst &Inst, unsigned N) const {
3511 assert(N == 1 && "Invalid number of operands!");
3512 // The immediate encodes the type of constant as well as the value.
3513 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3514 unsigned Value = CE->getValue();
3517 }
3518
3519 void addNEONi8ReplicateOperands(MCInst &Inst, bool Inv) const {
3520 // The immediate encodes the type of constant as well as the value.
3521 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3522 assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
3523 Inst.getOpcode() == ARM::VMOVv16i8) &&
3524 "All instructions that wants to replicate non-zero byte "
3525 "always must be replaced with VMOVv8i8 or VMOVv16i8.");
3526 unsigned Value = CE->getValue();
3527 if (Inv)
3528 Value = ~Value;
3529 unsigned B = Value & 0xff;
3530 B |= 0xe00; // cmode = 0b1110
3532 }
3533
3534 void addNEONinvi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3535 assert(N == 1 && "Invalid number of operands!");
3536 addNEONi8ReplicateOperands(Inst, true);
3537 }
3538
3539 static unsigned encodeNeonVMOVImmediate(unsigned Value) {
3540 if (Value >= 256 && Value <= 0xffff)
3541 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
3542 else if (Value > 0xffff && Value <= 0xffffff)
3543 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
3544 else if (Value > 0xffffff)
3545 Value = (Value >> 24) | 0x600;
3546 return Value;
3547 }
3548
3549 void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
3550 assert(N == 1 && "Invalid number of operands!");
3551 // The immediate encodes the type of constant as well as the value.
3552 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3553 unsigned Value = encodeNeonVMOVImmediate(CE->getValue());
3555 }
3556
3557 void addNEONvmovi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3558 assert(N == 1 && "Invalid number of operands!");
3559 addNEONi8ReplicateOperands(Inst, false);
3560 }
3561
3562 void addNEONvmovi16ReplicateOperands(MCInst &Inst, unsigned N) const {
3563 assert(N == 1 && "Invalid number of operands!");
3564 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3565 assert((Inst.getOpcode() == ARM::VMOVv4i16 ||
3566 Inst.getOpcode() == ARM::VMOVv8i16 ||
3567 Inst.getOpcode() == ARM::VMVNv4i16 ||
3568 Inst.getOpcode() == ARM::VMVNv8i16) &&
3569 "All instructions that want to replicate non-zero half-word "
3570 "always must be replaced with V{MOV,MVN}v{4,8}i16.");
3571 uint64_t Value = CE->getValue();
3572 unsigned Elem = Value & 0xffff;
3573 if (Elem >= 256)
3574 Elem = (Elem >> 8) | 0x200;
3575 Inst.addOperand(MCOperand::createImm(Elem));
3576 }
3577
3578 void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
3579 assert(N == 1 && "Invalid number of operands!");
3580 // The immediate encodes the type of constant as well as the value.
3581 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3582 unsigned Value = encodeNeonVMOVImmediate(~CE->getValue());
3584 }
3585
3586 void addNEONvmovi32ReplicateOperands(MCInst &Inst, unsigned N) const {
3587 assert(N == 1 && "Invalid number of operands!");
3588 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3589 assert((Inst.getOpcode() == ARM::VMOVv2i32 ||
3590 Inst.getOpcode() == ARM::VMOVv4i32 ||
3591 Inst.getOpcode() == ARM::VMVNv2i32 ||
3592 Inst.getOpcode() == ARM::VMVNv4i32) &&
3593 "All instructions that want to replicate non-zero word "
3594 "always must be replaced with V{MOV,MVN}v{2,4}i32.");
3595 uint64_t Value = CE->getValue();
3596 unsigned Elem = encodeNeonVMOVImmediate(Value & 0xffffffff);
3597 Inst.addOperand(MCOperand::createImm(Elem));
3598 }
3599
3600 void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
3601 assert(N == 1 && "Invalid number of operands!");
3602 // The immediate encodes the type of constant as well as the value.
3603 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3604 uint64_t Value = CE->getValue();
3605 unsigned Imm = 0;
3606 for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
3607 Imm |= (Value & 1) << i;
3608 }
3609 Inst.addOperand(MCOperand::createImm(Imm | 0x1e00));
3610 }
3611
3612 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
3613 assert(N == 1 && "Invalid number of operands!");
3614 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3615 Inst.addOperand(MCOperand::createImm(CE->getValue() / 90));
3616 }
3617
3618 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
3619 assert(N == 1 && "Invalid number of operands!");
3620 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3621 Inst.addOperand(MCOperand::createImm((CE->getValue() - 90) / 180));
3622 }
3623
3624 void addMveSaturateOperands(MCInst &Inst, unsigned N) const {
3625 assert(N == 1 && "Invalid number of operands!");
3626 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3627 unsigned Imm = CE->getValue();
3628 assert((Imm == 48 || Imm == 64) && "Invalid saturate operand");
3629 Inst.addOperand(MCOperand::createImm(Imm == 48 ? 1 : 0));
3630 }
3631
3632 void print(raw_ostream &OS) const override;
3633
3634 static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S,
3635 ARMAsmParser &Parser) {
3636 auto Op = std::make_unique<ARMOperand>(k_ITCondMask, Parser);
3637 Op->ITMask.Mask = Mask;
3638 Op->StartLoc = S;
3639 Op->EndLoc = S;
3640 return Op;
3641 }
3642
3643 static std::unique_ptr<ARMOperand>
3644 CreateCondCode(ARMCC::CondCodes CC, SMLoc S, ARMAsmParser &Parser) {
3645 auto Op = std::make_unique<ARMOperand>(k_CondCode, Parser);
3646 Op->CC.Val = CC;
3647 Op->StartLoc = S;
3648 Op->EndLoc = S;
3649 return Op;
3650 }
3651
3652 static std::unique_ptr<ARMOperand> CreateVPTPred(ARMVCC::VPTCodes CC, SMLoc S,
3653 ARMAsmParser &Parser) {
3654 auto Op = std::make_unique<ARMOperand>(k_VPTPred, Parser);
3655 Op->VCC.Val = CC;
3656 Op->StartLoc = S;
3657 Op->EndLoc = S;
3658 return Op;
3659 }
3660
3661 static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S,
3662 ARMAsmParser &Parser) {
3663 auto Op = std::make_unique<ARMOperand>(k_CoprocNum, Parser);
3664 Op->Cop.Val = CopVal;
3665 Op->StartLoc = S;
3666 Op->EndLoc = S;
3667 return Op;
3668 }
3669
3670 static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S,
3671 ARMAsmParser &Parser) {
3672 auto Op = std::make_unique<ARMOperand>(k_CoprocReg, Parser);
3673 Op->Cop.Val = CopVal;
3674 Op->StartLoc = S;
3675 Op->EndLoc = S;
3676 return Op;
3677 }
3678
3679 static std::unique_ptr<ARMOperand>
3680 CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E, ARMAsmParser &Parser) {
3681 auto Op = std::make_unique<ARMOperand>(k_CoprocOption, Parser);
3682 Op->Cop.Val = Val;
3683 Op->StartLoc = S;
3684 Op->EndLoc = E;
3685 return Op;
3686 }
3687
3688 static std::unique_ptr<ARMOperand> CreateCCOut(MCRegister Reg, SMLoc S,
3689 ARMAsmParser &Parser) {
3690 auto Op = std::make_unique<ARMOperand>(k_CCOut, Parser);
3691 Op->Reg.RegNum = Reg;
3692 Op->StartLoc = S;
3693 Op->EndLoc = S;
3694 return Op;
3695 }
3696
3697 static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S,
3698 ARMAsmParser &Parser) {
3699 auto Op = std::make_unique<ARMOperand>(k_Token, Parser);
3700 Op->Tok.Data = Str.data();
3701 Op->Tok.Length = Str.size();
3702 Op->StartLoc = S;
3703 Op->EndLoc = S;
3704 return Op;
3705 }
3706
3707 static std::unique_ptr<ARMOperand> CreateReg(MCRegister Reg, SMLoc S, SMLoc E,
3708 ARMAsmParser &Parser) {
3709 auto Op = std::make_unique<ARMOperand>(k_Register, Parser);
3710 Op->Reg.RegNum = Reg;
3711 Op->StartLoc = S;
3712 Op->EndLoc = E;
3713 return Op;
3714 }
3715
3716 static std::unique_ptr<ARMOperand>
3717 CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, MCRegister SrcReg,
3718 MCRegister ShiftReg, unsigned ShiftImm, SMLoc S,
3719 SMLoc E, ARMAsmParser &Parser) {
3720 auto Op = std::make_unique<ARMOperand>(k_ShiftedRegister, Parser);
3721 Op->RegShiftedReg.ShiftTy = ShTy;
3722 Op->RegShiftedReg.SrcReg = SrcReg;
3723 Op->RegShiftedReg.ShiftReg = ShiftReg;
3724 Op->RegShiftedReg.ShiftImm = ShiftImm;
3725 Op->StartLoc = S;
3726 Op->EndLoc = E;
3727 return Op;
3728 }
3729
3730 static std::unique_ptr<ARMOperand>
3731 CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, MCRegister SrcReg,
3732 unsigned ShiftImm, SMLoc S, SMLoc E,
3733 ARMAsmParser &Parser) {
3734 auto Op = std::make_unique<ARMOperand>(k_ShiftedImmediate, Parser);
3735 Op->RegShiftedImm.ShiftTy = ShTy;
3736 Op->RegShiftedImm.SrcReg = SrcReg;
3737 Op->RegShiftedImm.ShiftImm = ShiftImm;
3738 Op->StartLoc = S;
3739 Op->EndLoc = E;
3740 return Op;
3741 }
3742
3743 static std::unique_ptr<ARMOperand> CreateShifterImm(bool isASR, unsigned Imm,
3744 SMLoc S, SMLoc E,
3745 ARMAsmParser &Parser) {
3746 auto Op = std::make_unique<ARMOperand>(k_ShifterImmediate, Parser);
3747 Op->ShifterImm.isASR = isASR;
3748 Op->ShifterImm.Imm = Imm;
3749 Op->StartLoc = S;
3750 Op->EndLoc = E;
3751 return Op;
3752 }
3753
3754 static std::unique_ptr<ARMOperand>
3755 CreateRotImm(unsigned Imm, SMLoc S, SMLoc E, ARMAsmParser &Parser) {
3756 auto Op = std::make_unique<ARMOperand>(k_RotateImmediate, Parser);
3757 Op->RotImm.Imm = Imm;
3758 Op->StartLoc = S;
3759 Op->EndLoc = E;
3760 return Op;
3761 }
3762
3763 static std::unique_ptr<ARMOperand> CreateModImm(unsigned Bits, unsigned Rot,
3764 SMLoc S, SMLoc E,
3765 ARMAsmParser &Parser) {
3766 auto Op = std::make_unique<ARMOperand>(k_ModifiedImmediate, Parser);
3767 Op->ModImm.Bits = Bits;
3768 Op->ModImm.Rot = Rot;
3769 Op->StartLoc = S;
3770 Op->EndLoc = E;
3771 return Op;
3772 }
3773
3774 static std::unique_ptr<ARMOperand>
3775 CreateConstantPoolImm(const MCExpr *Val, SMLoc S, SMLoc E,
3776 ARMAsmParser &Parser) {
3777 auto Op = std::make_unique<ARMOperand>(k_ConstantPoolImmediate, Parser);
3778 Op->Imm.Val = Val;
3779 Op->StartLoc = S;
3780 Op->EndLoc = E;
3781 return Op;
3782 }
3783
3784 static std::unique_ptr<ARMOperand> CreateBitfield(unsigned LSB,
3785 unsigned Width, SMLoc S,
3786 SMLoc E,
3787 ARMAsmParser &Parser) {
3788 auto Op = std::make_unique<ARMOperand>(k_BitfieldDescriptor, Parser);
3789 Op->Bitfield.LSB = LSB;
3790 Op->Bitfield.Width = Width;
3791 Op->StartLoc = S;
3792 Op->EndLoc = E;
3793 return Op;
3794 }
3795
3796 static std::unique_ptr<ARMOperand>
3797 CreateRegList(SmallVectorImpl<std::pair<unsigned, MCRegister>> &Regs,
3798 SMLoc StartLoc, SMLoc EndLoc, ARMAsmParser &Parser) {
3799 assert(Regs.size() > 0 && "RegList contains no registers?");
3800 KindTy Kind = k_RegisterList;
3801
3802 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
3803 Regs.front().second)) {
3804 if (Regs.back().second == ARM::VPR)
3805 Kind = k_FPDRegisterListWithVPR;
3806 else
3807 Kind = k_DPRRegisterList;
3808 } else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
3809 Regs.front().second)) {
3810 if (Regs.back().second == ARM::VPR)
3811 Kind = k_FPSRegisterListWithVPR;
3812 else
3813 Kind = k_SPRRegisterList;
3814 } else if (Regs.front().second == ARM::VPR) {
3815 assert(Regs.size() == 1 &&
3816 "Register list starting with VPR expected to only contain VPR");
3817 Kind = k_FPSRegisterListWithVPR;
3818 }
3819
3820 if (Kind == k_RegisterList && Regs.back().second == ARM::APSR)
3821 Kind = k_RegisterListWithAPSR;
3822
3823 assert(llvm::is_sorted(Regs) && "Register list must be sorted by encoding");
3824
3825 auto Op = std::make_unique<ARMOperand>(Kind, Parser);
3826 for (const auto &P : Regs)
3827 Op->Registers.push_back(P.second);
3828
3829 Op->StartLoc = StartLoc;
3830 Op->EndLoc = EndLoc;
3831 return Op;
3832 }
3833
3834 static std::unique_ptr<ARMOperand>
3835 CreateVectorList(MCRegister Reg, unsigned Count, bool isDoubleSpaced, SMLoc S,
3836 SMLoc E, ARMAsmParser &Parser) {
3837 auto Op = std::make_unique<ARMOperand>(k_VectorList, Parser);
3838 Op->VectorList.RegNum = Reg;
3839 Op->VectorList.Count = Count;
3840 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3841 Op->StartLoc = S;
3842 Op->EndLoc = E;
3843 return Op;
3844 }
3845
3846 static std::unique_ptr<ARMOperand>
3847 CreateVectorListAllLanes(MCRegister Reg, unsigned Count, bool isDoubleSpaced,
3848 SMLoc S, SMLoc E, ARMAsmParser &Parser) {
3849 auto Op = std::make_unique<ARMOperand>(k_VectorListAllLanes, Parser);
3850 Op->VectorList.RegNum = Reg;
3851 Op->VectorList.Count = Count;
3852 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3853 Op->StartLoc = S;
3854 Op->EndLoc = E;
3855 return Op;
3856 }
3857
3858 static std::unique_ptr<ARMOperand>
3859 CreateVectorListIndexed(MCRegister Reg, unsigned Count, unsigned Index,
3860 bool isDoubleSpaced, SMLoc S, SMLoc E,
3861 ARMAsmParser &Parser) {
3862 auto Op = std::make_unique<ARMOperand>(k_VectorListIndexed, Parser);
3863 Op->VectorList.RegNum = Reg;
3864 Op->VectorList.Count = Count;
3865 Op->VectorList.LaneIndex = Index;
3866 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3867 Op->StartLoc = S;
3868 Op->EndLoc = E;
3869 return Op;
3870 }
3871
3872 static std::unique_ptr<ARMOperand> CreateVectorIndex(unsigned Idx, SMLoc S,
3873 SMLoc E, MCContext &Ctx,
3874 ARMAsmParser &Parser) {
3875 auto Op = std::make_unique<ARMOperand>(k_VectorIndex, Parser);
3876 Op->VectorIndex.Val = Idx;
3877 Op->StartLoc = S;
3878 Op->EndLoc = E;
3879 return Op;
3880 }
3881
3882 static std::unique_ptr<ARMOperand> CreateImm(const MCExpr *Val, SMLoc S,
3883 SMLoc E, ARMAsmParser &Parser) {
3884 auto Op = std::make_unique<ARMOperand>(k_Immediate, Parser);
3885 Op->Imm.Val = Val;
3886 Op->StartLoc = S;
3887 Op->EndLoc = E;
3888 return Op;
3889 }
3890
3891 static std::unique_ptr<ARMOperand>
3892 CreateMem(MCRegister BaseReg, const MCExpr *OffsetImm, MCRegister OffsetReg,
3893 ARM_AM::ShiftOpc ShiftType, unsigned ShiftImm, unsigned Alignment,
3894 bool isNegative, SMLoc S, SMLoc E, ARMAsmParser &Parser,
3895 SMLoc AlignmentLoc = SMLoc()) {
3896 auto Op = std::make_unique<ARMOperand>(k_Memory, Parser);
3897 Op->Memory.BaseRegNum = BaseReg;
3898 Op->Memory.OffsetImm = OffsetImm;
3899 Op->Memory.OffsetRegNum = OffsetReg;
3900 Op->Memory.ShiftType = ShiftType;
3901 Op->Memory.ShiftImm = ShiftImm;
3902 Op->Memory.Alignment = Alignment;
3903 Op->Memory.isNegative = isNegative;
3904 Op->StartLoc = S;
3905 Op->EndLoc = E;
3906 Op->AlignmentLoc = AlignmentLoc;
3907 return Op;
3908 }
3909
3910 static std::unique_ptr<ARMOperand>
3911 CreatePostIdxReg(MCRegister Reg, bool isAdd, ARM_AM::ShiftOpc ShiftTy,
3912 unsigned ShiftImm, SMLoc S, SMLoc E, ARMAsmParser &Parser) {
3913 auto Op = std::make_unique<ARMOperand>(k_PostIndexRegister, Parser);
3914 Op->PostIdxReg.RegNum = Reg;
3915 Op->PostIdxReg.isAdd = isAdd;
3916 Op->PostIdxReg.ShiftTy = ShiftTy;
3917 Op->PostIdxReg.ShiftImm = ShiftImm;
3918 Op->StartLoc = S;
3919 Op->EndLoc = E;
3920 return Op;
3921 }
3922
3923 static std::unique_ptr<ARMOperand>
3924 CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S, ARMAsmParser &Parser) {
3925 auto Op = std::make_unique<ARMOperand>(k_MemBarrierOpt, Parser);
3926 Op->MBOpt.Val = Opt;
3927 Op->StartLoc = S;
3928 Op->EndLoc = S;
3929 return Op;
3930 }
3931
3932 static std::unique_ptr<ARMOperand>
3933 CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S,
3934 ARMAsmParser &Parser) {
3935 auto Op = std::make_unique<ARMOperand>(k_InstSyncBarrierOpt, Parser);
3936 Op->ISBOpt.Val = Opt;
3937 Op->StartLoc = S;
3938 Op->EndLoc = S;
3939 return Op;
3940 }
3941
3942 static std::unique_ptr<ARMOperand>
3943 CreateTraceSyncBarrierOpt(ARM_TSB::TraceSyncBOpt Opt, SMLoc S,
3944 ARMAsmParser &Parser) {
3945 auto Op = std::make_unique<ARMOperand>(k_TraceSyncBarrierOpt, Parser);
3946 Op->TSBOpt.Val = Opt;
3947 Op->StartLoc = S;
3948 Op->EndLoc = S;
3949 return Op;
3950 }
3951
3952 static std::unique_ptr<ARMOperand>
3953 CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S, ARMAsmParser &Parser) {
3954 auto Op = std::make_unique<ARMOperand>(k_ProcIFlags, Parser);
3955 Op->IFlags.Val = IFlags;
3956 Op->StartLoc = S;
3957 Op->EndLoc = S;
3958 return Op;
3959 }
3960
3961 static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S,
3962 ARMAsmParser &Parser) {
3963 auto Op = std::make_unique<ARMOperand>(k_MSRMask, Parser);
3964 Op->MMask.Val = MMask;
3965 Op->StartLoc = S;
3966 Op->EndLoc = S;
3967 return Op;
3968 }
3969
3970 static std::unique_ptr<ARMOperand> CreateBankedReg(unsigned Reg, SMLoc S,
3971 ARMAsmParser &Parser) {
3972 auto Op = std::make_unique<ARMOperand>(k_BankedReg, Parser);
3973 Op->BankedReg.Val = Reg;
3974 Op->StartLoc = S;
3975 Op->EndLoc = S;
3976 return Op;
3977 }
3978};
3979
3980} // end anonymous namespace.
3981
3982void ARMOperand::print(raw_ostream &OS) const {
3983 auto RegName = [](MCRegister Reg) {
3984 if (Reg)
3986 else
3987 return "noreg";
3988 };
3989
3990 switch (Kind) {
3991 case k_CondCode:
3992 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
3993 break;
3994 case k_VPTPred:
3995 OS << "<ARMVCC::" << ARMVPTPredToString(getVPTPred()) << ">";
3996 break;
3997 case k_CCOut:
3998 OS << "<ccout " << RegName(getReg()) << ">";
3999 break;
4000 case k_ITCondMask: {
4001 static const char *const MaskStr[] = {
4002 "(invalid)", "(tttt)", "(ttt)", "(ttte)",
4003 "(tt)", "(ttet)", "(tte)", "(ttee)",
4004 "(t)", "(tett)", "(tet)", "(tete)",
4005 "(te)", "(teet)", "(tee)", "(teee)",
4006 };
4007 assert((ITMask.Mask & 0xf) == ITMask.Mask);
4008 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
4009 break;
4010 }
4011 case k_CoprocNum:
4012 OS << "<coprocessor number: " << getCoproc() << ">";
4013 break;
4014 case k_CoprocReg:
4015 OS << "<coprocessor register: " << getCoproc() << ">";
4016 break;
4017 case k_CoprocOption:
4018 OS << "<coprocessor option: " << CoprocOption.Val << ">";
4019 break;
4020 case k_MSRMask:
4021 OS << "<mask: " << getMSRMask() << ">";
4022 break;
4023 case k_BankedReg:
4024 OS << "<banked reg: " << getBankedReg() << ">";
4025 break;
4026 case k_Immediate:
4027 OS << *getImm();
4028 break;
4029 case k_MemBarrierOpt:
4030 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">";
4031 break;
4032 case k_InstSyncBarrierOpt:
4033 OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">";
4034 break;
4035 case k_TraceSyncBarrierOpt:
4036 OS << "<ARM_TSB::" << TraceSyncBOptToString(getTraceSyncBarrierOpt()) << ">";
4037 break;
4038 case k_Memory:
4039 OS << "<memory";
4040 if (Memory.BaseRegNum)
4041 OS << " base:" << RegName(Memory.BaseRegNum);
4042 if (Memory.OffsetImm)
4043 OS << " offset-imm:" << *Memory.OffsetImm;
4044 if (Memory.OffsetRegNum)
4045 OS << " offset-reg:" << (Memory.isNegative ? "-" : "")
4046 << RegName(Memory.OffsetRegNum);
4047 if (Memory.ShiftType != ARM_AM::no_shift) {
4048 OS << " shift-type:" << ARM_AM::getShiftOpcStr(Memory.ShiftType);
4049 OS << " shift-imm:" << Memory.ShiftImm;
4050 }
4051 if (Memory.Alignment)
4052 OS << " alignment:" << Memory.Alignment;
4053 OS << ">";
4054 break;
4055 case k_PostIndexRegister:
4056 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
4057 << RegName(PostIdxReg.RegNum);
4058 if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
4059 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
4060 << PostIdxReg.ShiftImm;
4061 OS << ">";
4062 break;
4063 case k_ProcIFlags: {
4064 OS << "<ARM_PROC::";
4065 unsigned IFlags = getProcIFlags();
4066 for (int i=2; i >= 0; --i)
4067 if (IFlags & (1 << i))
4068 OS << ARM_PROC::IFlagsToString(1 << i);
4069 OS << ">";
4070 break;
4071 }
4072 case k_Register:
4073 OS << "<register " << RegName(getReg()) << ">";
4074 break;
4075 case k_ShifterImmediate:
4076 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
4077 << " #" << ShifterImm.Imm << ">";
4078 break;
4079 case k_ShiftedRegister:
4080 OS << "<so_reg_reg " << RegName(RegShiftedReg.SrcReg) << " "
4081 << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy) << " "
4082 << RegName(RegShiftedReg.ShiftReg) << ">";
4083 break;
4084 case k_ShiftedImmediate:
4085 OS << "<so_reg_imm " << RegName(RegShiftedImm.SrcReg) << " "
4086 << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy) << " #"
4087 << RegShiftedImm.ShiftImm << ">";
4088 break;
4089 case k_RotateImmediate:
4090 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
4091 break;
4092 case k_ModifiedImmediate:
4093 OS << "<mod_imm #" << ModImm.Bits << ", #"
4094 << ModImm.Rot << ")>";
4095 break;
4096 case k_ConstantPoolImmediate:
4097 OS << "<constant_pool_imm #" << *getConstantPoolImm();
4098 break;
4099 case k_BitfieldDescriptor:
4100 OS << "<bitfield " << "lsb: " << Bitfield.LSB
4101 << ", width: " << Bitfield.Width << ">";
4102 break;
4103 case k_RegisterList:
4104 case k_RegisterListWithAPSR:
4105 case k_DPRRegisterList:
4106 case k_SPRRegisterList:
4107 case k_FPSRegisterListWithVPR:
4108 case k_FPDRegisterListWithVPR: {
4109 OS << "<register_list ";
4110
4111 const SmallVectorImpl<MCRegister> &RegList = getRegList();
4112 for (auto I = RegList.begin(), E = RegList.end(); I != E;) {
4113 OS << RegName(*I);
4114 if (++I < E) OS << ", ";
4115 }
4116
4117 OS << ">";
4118 break;
4119 }
4120 case k_VectorList:
4121 OS << "<vector_list " << VectorList.Count << " * "
4122 << RegName(VectorList.RegNum) << ">";
4123 break;
4124 case k_VectorListAllLanes:
4125 OS << "<vector_list(all lanes) " << VectorList.Count << " * "
4126 << RegName(VectorList.RegNum) << ">";
4127 break;
4128 case k_VectorListIndexed:
4129 OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
4130 << VectorList.Count << " * " << RegName(VectorList.RegNum) << ">";
4131 break;
4132 case k_Token:
4133 OS << "'" << getToken() << "'";
4134 break;
4135 case k_VectorIndex:
4136 OS << "<vectorindex " << getVectorIndex() << ">";
4137 break;
4138 }
4139}
4140
4141/// @name Auto-generated Match Functions
4142/// {
4143
4145
4146/// }
4147
4148static bool isDataTypeToken(StringRef Tok) {
4149 static const DenseSet<StringRef> DataTypes{
4150 ".8", ".16", ".32", ".64", ".i8", ".i16", ".i32", ".i64",
4151 ".u8", ".u16", ".u32", ".u64", ".s8", ".s16", ".s32", ".s64",
4152 ".p8", ".p16", ".f32", ".f64", ".f", ".d"};
4153 return DataTypes.contains(Tok);
4154}
4155
4157 unsigned MnemonicOpsEndInd = 1;
4158 // Special case for CPS which has a Mnemonic side token for possibly storing
4159 // ie/id variant
4160 if (Operands[0]->isToken() &&
4161 static_cast<ARMOperand &>(*Operands[0]).getToken() == "cps") {
4162 if (Operands.size() > 1 && Operands[1]->isImm() &&
4163 static_cast<ARMOperand &>(*Operands[1]).getImm()->getKind() ==
4165 (dyn_cast<MCConstantExpr>(
4166 static_cast<ARMOperand &>(*Operands[1]).getImm())
4167 ->getValue() == ARM_PROC::IE ||
4168 dyn_cast<MCConstantExpr>(
4169 static_cast<ARMOperand &>(*Operands[1]).getImm())
4170 ->getValue() == ARM_PROC::ID))
4171 ++MnemonicOpsEndInd;
4172 }
4173
4174 // In some circumstances the condition code moves to the right
4175 bool RHSCondCode = false;
4176 while (MnemonicOpsEndInd < Operands.size()) {
4177 auto Op = static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]);
4178 // Special case for it instructions which have a condition code on the RHS
4179 if (Op.isITMask()) {
4180 RHSCondCode = true;
4181 MnemonicOpsEndInd++;
4182 } else if (Op.isToken() &&
4183 (
4184 // There are several special cases not covered by
4185 // isDataTypeToken
4186 Op.getToken() == ".w" || Op.getToken() == ".bf16" ||
4187 Op.getToken() == ".p64" || Op.getToken() == ".f16" ||
4188 isDataTypeToken(Op.getToken()))) {
4189 // In the mnemonic operators the cond code must always precede the data
4190 // type. So we can now safely assume any subsequent cond code is on the
4191 // RHS. As is the case for VCMP and VPT.
4192 RHSCondCode = true;
4193 MnemonicOpsEndInd++;
4194 }
4195 // Skip all mnemonic operator types
4196 else if (Op.isCCOut() || (Op.isCondCode() && !RHSCondCode) ||
4197 Op.isVPTPred() || (Op.isToken() && Op.getToken() == ".w"))
4198 MnemonicOpsEndInd++;
4199 else
4200 break;
4201 }
4202 return MnemonicOpsEndInd;
4203}
4204
4205bool ARMAsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
4206 SMLoc &EndLoc) {
4207 const AsmToken &Tok = getParser().getTok();
4208 StartLoc = Tok.getLoc();
4209 EndLoc = Tok.getEndLoc();
4210 Reg = tryParseRegister();
4211
4212 return !Reg;
4213}
4214
4215ParseStatus ARMAsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
4216 SMLoc &EndLoc) {
4217 if (parseRegister(Reg, StartLoc, EndLoc))
4218 return ParseStatus::NoMatch;
4219 return ParseStatus::Success;
4220}
4221
4222/// Try to parse a register name. The token must be an Identifier when called,
4223/// and if it is a register name the token is eaten and the register is
4224/// returned. Otherwise return an invalid MCRegister.
4225MCRegister ARMAsmParser::tryParseRegister(bool AllowOutOfBoundReg) {
4226 MCAsmParser &Parser = getParser();
4227 const AsmToken &Tok = Parser.getTok();
4228 if (Tok.isNot(AsmToken::Identifier))
4229 return MCRegister();
4230
4231 std::string lowerCase = Tok.getString().lower();
4232 MCRegister Reg = MatchRegisterName(lowerCase);
4233 if (!Reg) {
4234 Reg = StringSwitch<MCRegister>(lowerCase)
4235 .Case("r13", ARM::SP)
4236 .Case("r14", ARM::LR)
4237 .Case("r15", ARM::PC)
4238 .Case("ip", ARM::R12)
4239 // Additional register name aliases for 'gas' compatibility.
4240 .Case("a1", ARM::R0)
4241 .Case("a2", ARM::R1)
4242 .Case("a3", ARM::R2)
4243 .Case("a4", ARM::R3)
4244 .Case("v1", ARM::R4)
4245 .Case("v2", ARM::R5)
4246 .Case("v3", ARM::R6)
4247 .Case("v4", ARM::R7)
4248 .Case("v5", ARM::R8)
4249 .Case("v6", ARM::R9)
4250 .Case("v7", ARM::R10)
4251 .Case("v8", ARM::R11)
4252 .Case("sb", ARM::R9)
4253 .Case("sl", ARM::R10)
4254 .Case("fp", ARM::R11)
4255 .Default(MCRegister());
4256 }
4257 if (!Reg) {
4258 // Check for aliases registered via .req. Canonicalize to lower case.
4259 // That's more consistent since register names are case insensitive, and
4260 // it's how the original entry was passed in from MC/MCParser/AsmParser.
4261 auto Entry = RegisterReqs.find(lowerCase);
4262 // If no match, return failure.
4263 if (Entry == RegisterReqs.end())
4264 return MCRegister();
4265 Parser.Lex(); // Eat identifier token.
4266 return Entry->getValue();
4267 }
4268
4269 // Some FPUs only have 16 D registers, so D16-D31 are invalid
4270 if (!AllowOutOfBoundReg && !hasD32() && Reg >= ARM::D16 && Reg <= ARM::D31)
4271 return MCRegister();
4272
4273 Parser.Lex(); // Eat identifier token.
4274
4275 return Reg;
4276}
4277
4278std::optional<ARM_AM::ShiftOpc> ARMAsmParser::tryParseShiftToken() {
4279 MCAsmParser &Parser = getParser();
4280 const AsmToken &Tok = Parser.getTok();
4281 if (Tok.isNot(AsmToken::Identifier))
4282 return std::nullopt;
4283
4284 std::string lowerCase = Tok.getString().lower();
4286 .Case("asl", ARM_AM::lsl)
4287 .Case("lsl", ARM_AM::lsl)
4288 .Case("lsr", ARM_AM::lsr)
4289 .Case("asr", ARM_AM::asr)
4290 .Case("ror", ARM_AM::ror)
4291 .Case("rrx", ARM_AM::rrx)
4292 .Default(std::nullopt);
4293}
4294
4295// Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0.
4296// If a recoverable error occurs, return 1. If an irrecoverable error
4297// occurs, return -1. An irrecoverable error is one where tokens have been
4298// consumed in the process of trying to parse the shifter (i.e., when it is
4299// indeed a shifter operand, but malformed).
4300int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) {
4301 MCAsmParser &Parser = getParser();
4302 SMLoc S = Parser.getTok().getLoc();
4303
4304 auto ShiftTyOpt = tryParseShiftToken();
4305 if (ShiftTyOpt == std::nullopt)
4306 return 1;
4307 auto ShiftTy = ShiftTyOpt.value();
4308
4309 Parser.Lex(); // Eat the operator.
4310
4311 // The source register for the shift has already been added to the
4312 // operand list, so we need to pop it off and combine it into the shifted
4313 // register operand instead.
4314 std::unique_ptr<ARMOperand> PrevOp(
4315 (ARMOperand *)Operands.pop_back_val().release());
4316 if (!PrevOp->isReg())
4317 return Error(PrevOp->getStartLoc(), "shift must be of a register");
4318 MCRegister SrcReg = PrevOp->getReg();
4319
4320 SMLoc EndLoc;
4321 int64_t Imm = 0;
4322 MCRegister ShiftReg;
4323 if (ShiftTy == ARM_AM::rrx) {
4324 // RRX Doesn't have an explicit shift amount. The encoder expects
4325 // the shift register to be the same as the source register. Seems odd,
4326 // but OK.
4327 ShiftReg = SrcReg;
4328 } else {
4329 // Figure out if this is shifted by a constant or a register (for non-RRX).
4330 if (Parser.getTok().is(AsmToken::Hash) ||
4331 Parser.getTok().is(AsmToken::Dollar)) {
4332 Parser.Lex(); // Eat hash.
4333 SMLoc ImmLoc = Parser.getTok().getLoc();
4334 const MCExpr *ShiftExpr = nullptr;
4335 if (getParser().parseExpression(ShiftExpr, EndLoc)) {
4336 Error(ImmLoc, "invalid immediate shift value");
4337 return -1;
4338 }
4339 // The expression must be evaluatable as an immediate.
4340 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
4341 if (!CE) {
4342 Error(ImmLoc, "invalid immediate shift value");
4343 return -1;
4344 }
4345 // Range check the immediate.
4346 // lsl, ror: 0 <= imm <= 31
4347 // lsr, asr: 0 <= imm <= 32
4348 Imm = CE->getValue();
4349 if (Imm < 0 ||
4350 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
4351 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
4352 Error(ImmLoc, "immediate shift value out of range");
4353 return -1;
4354 }
4355 // shift by zero is a nop. Always send it through as lsl.
4356 // ('as' compatibility)
4357 if (Imm == 0)
4358 ShiftTy = ARM_AM::lsl;
4359 } else if (Parser.getTok().is(AsmToken::Identifier)) {
4360 SMLoc L = Parser.getTok().getLoc();
4361 EndLoc = Parser.getTok().getEndLoc();
4362 ShiftReg = tryParseRegister();
4363 if (!ShiftReg) {
4364 Error(L, "expected immediate or register in shift operand");
4365 return -1;
4366 }
4367 } else {
4368 Error(Parser.getTok().getLoc(),
4369 "expected immediate or register in shift operand");
4370 return -1;
4371 }
4372 }
4373
4374 if (ShiftReg && ShiftTy != ARM_AM::rrx)
4375 Operands.push_back(ARMOperand::CreateShiftedRegister(
4376 ShiftTy, SrcReg, ShiftReg, Imm, S, EndLoc, *this));
4377 else
4378 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
4379 S, EndLoc, *this));
4380
4381 return 0;
4382}
4383
4384/// Try to parse a register name. The token must be an Identifier when called.
4385/// If it's a register, an AsmOperand is created. Another AsmOperand is created
4386/// if there is a "writeback". 'true' if it's not a register.
4387///
4388/// TODO this is likely to change to allow different register types and or to
4389/// parse for a specific register type.
4390bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) {
4391 MCAsmParser &Parser = getParser();
4392 SMLoc RegStartLoc = Parser.getTok().getLoc();
4393 SMLoc RegEndLoc = Parser.getTok().getEndLoc();
4394 MCRegister Reg = tryParseRegister();
4395 if (!Reg)
4396 return true;
4397
4398 Operands.push_back(ARMOperand::CreateReg(Reg, RegStartLoc, RegEndLoc, *this));
4399
4400 const AsmToken &ExclaimTok = Parser.getTok();
4401 if (ExclaimTok.is(AsmToken::Exclaim)) {
4402 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
4403 ExclaimTok.getLoc(), *this));
4404 Parser.Lex(); // Eat exclaim token
4405 return false;
4406 }
4407
4408 // Also check for an index operand. This is only legal for vector registers,
4409 // but that'll get caught OK in operand matching, so we don't need to
4410 // explicitly filter everything else out here.
4411 if (Parser.getTok().is(AsmToken::LBrac)) {
4412 SMLoc SIdx = Parser.getTok().getLoc();
4413 Parser.Lex(); // Eat left bracket token.
4414
4415 const MCExpr *ImmVal;
4416 if (getParser().parseExpression(ImmVal))
4417 return true;
4418 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4419 if (!MCE)
4420 return TokError("immediate value expected for vector index");
4421
4422 if (Parser.getTok().isNot(AsmToken::RBrac))
4423 return Error(Parser.getTok().getLoc(), "']' expected");
4424
4425 SMLoc E = Parser.getTok().getEndLoc();
4426 Parser.Lex(); // Eat right bracket token.
4427
4428 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(), SIdx, E,
4429 getContext(), *this));
4430 }
4431
4432 return false;
4433}
4434
4435/// MatchCoprocessorOperandName - Try to parse an coprocessor related
4436/// instruction with a symbolic operand name.
4437/// We accept "crN" syntax for GAS compatibility.
4438/// <operand-name> ::= <prefix><number>
4439/// If CoprocOp is 'c', then:
4440/// <prefix> ::= c | cr
4441/// If CoprocOp is 'p', then :
4442/// <prefix> ::= p
4443/// <number> ::= integer in range [0, 15]
4444static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
4445 // Use the same layout as the tablegen'erated register name matcher. Ugly,
4446 // but efficient.
4447 if (Name.size() < 2 || Name[0] != CoprocOp)
4448 return -1;
4449 Name = (Name[1] == 'r') ? Name.drop_front(2) : Name.drop_front();
4450
4451 switch (Name.size()) {
4452 default: return -1;
4453 case 1:
4454 switch (Name[0]) {
4455 default: return -1;
4456 case '0': return 0;
4457 case '1': return 1;
4458 case '2': return 2;
4459 case '3': return 3;
4460 case '4': return 4;
4461 case '5': return 5;
4462 case '6': return 6;
4463 case '7': return 7;
4464 case '8': return 8;
4465 case '9': return 9;
4466 }
4467 case 2:
4468 if (Name[0] != '1')
4469 return -1;
4470 switch (Name[1]) {
4471 default: return -1;
4472 // CP10 and CP11 are VFP/NEON and so vector instructions should be used.
4473 // However, old cores (v5/v6) did use them in that way.
4474 case '0': return 10;
4475 case '1': return 11;
4476 case '2': return 12;
4477 case '3': return 13;
4478 case '4': return 14;
4479 case '5': return 15;
4480 }
4481 }
4482}
4483
4484/// parseITCondCode - Try to parse a condition code for an IT instruction.
4485ParseStatus ARMAsmParser::parseITCondCode(OperandVector &Operands) {
4486 MCAsmParser &Parser = getParser();
4487 SMLoc S = Parser.getTok().getLoc();
4488 const AsmToken &Tok = Parser.getTok();
4489 if (!Tok.is(AsmToken::Identifier))
4490 return ParseStatus::NoMatch;
4491 unsigned CC = ARMCondCodeFromString(Tok.getString());
4492 if (CC == ~0U)
4493 return ParseStatus::NoMatch;
4494 Parser.Lex(); // Eat the token.
4495
4496 Operands.push_back(
4497 ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S, *this));
4498
4499 return ParseStatus::Success;
4500}
4501
4502/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
4503/// token must be an Identifier when called, and if it is a coprocessor
4504/// number, the token is eaten and the operand is added to the operand list.
4505ParseStatus ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) {
4506 MCAsmParser &Parser = getParser();
4507 SMLoc S = Parser.getTok().getLoc();
4508 const AsmToken &Tok = Parser.getTok();
4509 if (Tok.isNot(AsmToken::Identifier))
4510 return ParseStatus::NoMatch;
4511
4512 int Num = MatchCoprocessorOperandName(Tok.getString().lower(), 'p');
4513 if (Num == -1)
4514 return ParseStatus::NoMatch;
4515 if (!isValidCoprocessorNumber(Num, getSTI().getFeatureBits()))
4516 return ParseStatus::NoMatch;
4517
4518 Parser.Lex(); // Eat identifier token.
4519 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S, *this));
4520 return ParseStatus::Success;
4521}
4522
4523/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
4524/// token must be an Identifier when called, and if it is a coprocessor
4525/// number, the token is eaten and the operand is added to the operand list.
4526ParseStatus ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) {
4527 MCAsmParser &Parser = getParser();
4528 SMLoc S = Parser.getTok().getLoc();
4529 const AsmToken &Tok = Parser.getTok();
4530 if (Tok.isNot(AsmToken::Identifier))
4531 return ParseStatus::NoMatch;
4532
4533 int Reg = MatchCoprocessorOperandName(Tok.getString().lower(), 'c');
4534 if (Reg == -1)
4535 return ParseStatus::NoMatch;
4536
4537 Parser.Lex(); // Eat identifier token.
4538 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S, *this));
4539 return ParseStatus::Success;
4540}
4541
4542/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
4543/// coproc_option : '{' imm0_255 '}'
4544ParseStatus ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands) {
4545 MCAsmParser &Parser = getParser();
4546 SMLoc S = Parser.getTok().getLoc();
4547
4548 // If this isn't a '{', this isn't a coprocessor immediate operand.
4549 if (Parser.getTok().isNot(AsmToken::LCurly))
4550 return ParseStatus::NoMatch;
4551 Parser.Lex(); // Eat the '{'
4552
4553 const MCExpr *Expr;
4554 SMLoc Loc = Parser.getTok().getLoc();
4555 if (getParser().parseExpression(Expr))
4556 return Error(Loc, "illegal expression");
4557 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4558 if (!CE || CE->getValue() < 0 || CE->getValue() > 255)
4559 return Error(Loc,
4560 "coprocessor option must be an immediate in range [0, 255]");
4561 int Val = CE->getValue();
4562
4563 // Check for and consume the closing '}'
4564 if (Parser.getTok().isNot(AsmToken::RCurly))
4565 return ParseStatus::Failure;
4566 SMLoc E = Parser.getTok().getEndLoc();
4567 Parser.Lex(); // Eat the '}'
4568
4569 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E, *this));
4570 return ParseStatus::Success;
4571}
4572
4573// For register list parsing, we need to map from raw GPR register numbering
4574// to the enumeration values. The enumeration values aren't sorted by
4575// register number due to our using "sp", "lr" and "pc" as canonical names.
4577 // If this is a GPR, we need to do it manually, otherwise we can rely
4578 // on the sort ordering of the enumeration since the other reg-classes
4579 // are sane.
4580 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4581 return Reg + 1;
4582 switch (Reg.id()) {
4583 default: llvm_unreachable("Invalid GPR number!");
4584 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2;
4585 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4;
4586 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6;
4587 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8;
4588 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10;
4589 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
4590 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR;
4591 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0;
4592 }
4593}
4594
4595// Insert an <Encoding, Register> pair in an ordered vector. Return true on
4596// success, or false, if duplicate encoding found.
4597static bool
4598insertNoDuplicates(SmallVectorImpl<std::pair<unsigned, MCRegister>> &Regs,
4599 unsigned Enc, MCRegister Reg) {
4600 Regs.emplace_back(Enc, Reg);
4601 for (auto I = Regs.rbegin(), J = I + 1, E = Regs.rend(); J != E; ++I, ++J) {
4602 if (J->first == Enc) {
4603 Regs.erase(J.base());
4604 return false;
4605 }
4606 if (J->first < Enc)
4607 break;
4608 std::swap(*I, *J);
4609 }
4610 return true;
4611}
4612
4613/// Parse a register list.
4614bool ARMAsmParser::parseRegisterList(OperandVector &Operands, bool EnforceOrder,
4615 bool AllowRAAC, bool IsLazyLoadStore,
4616 bool IsVSCCLRM) {
4617 MCAsmParser &Parser = getParser();
4618 if (Parser.getTok().isNot(AsmToken::LCurly))
4619 return TokError("Token is not a Left Curly Brace");
4620 SMLoc S = Parser.getTok().getLoc();
4621 Parser.Lex(); // Eat '{' token.
4622 SMLoc RegLoc = Parser.getTok().getLoc();
4623
4624 // Check the first register in the list to see what register class
4625 // this is a list of.
4626 bool AllowOutOfBoundReg = IsLazyLoadStore || IsVSCCLRM;
4627 MCRegister Reg = tryParseRegister(AllowOutOfBoundReg);
4628 if (!Reg)
4629 return Error(RegLoc, "register expected");
4630 if (!AllowRAAC && Reg == ARM::RA_AUTH_CODE)
4631 return Error(RegLoc, "pseudo-register not allowed");
4632 // The reglist instructions have at most 32 registers, so reserve
4633 // space for that many.
4634 int EReg = 0;
4636
4637 // Single-precision VSCCLRM can have double-precision registers in the
4638 // register list. When VSCCLRMAdjustEncoding is true then we've switched from
4639 // single-precision to double-precision and we pretend that these registers
4640 // are encoded as S32 onwards, which we can do by adding 16 to the encoding
4641 // value.
4642 bool VSCCLRMAdjustEncoding = false;
4643
4644 // Allow Q regs and just interpret them as the two D sub-registers.
4645 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4646 Reg = getDRegFromQReg(Reg);
4647 EReg = MRI->getEncodingValue(Reg);
4648 Registers.emplace_back(EReg, Reg);
4649 Reg = Reg + 1;
4650 }
4651 const MCRegisterClass *RC;
4652 if (Reg == ARM::RA_AUTH_CODE ||
4653 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4654 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
4655 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
4656 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
4657 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
4658 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
4659 else if (ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
4660 RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4661 else if (Reg == ARM::VPR)
4662 RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID];
4663 else
4664 return Error(RegLoc, "invalid register in register list");
4665
4666 // Store the register.
4667 EReg = MRI->getEncodingValue(Reg);
4668 Registers.emplace_back(EReg, Reg);
4669
4670 // This starts immediately after the first register token in the list,
4671 // so we can see either a comma or a minus (range separator) as a legal
4672 // next token.
4673 while (Parser.getTok().is(AsmToken::Comma) ||
4674 Parser.getTok().is(AsmToken::Minus)) {
4675 if (Parser.getTok().is(AsmToken::Minus)) {
4676 if (Reg == ARM::RA_AUTH_CODE)
4677 return Error(RegLoc, "pseudo-register not allowed");
4678 Parser.Lex(); // Eat the minus.
4679 SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4680 MCRegister EndReg = tryParseRegister(AllowOutOfBoundReg);
4681 if (!EndReg)
4682 return Error(AfterMinusLoc, "register expected");
4683 if (EndReg == ARM::RA_AUTH_CODE)
4684 return Error(AfterMinusLoc, "pseudo-register not allowed");
4685 // Allow Q regs and just interpret them as the two D sub-registers.
4686 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
4687 EndReg = getDRegFromQReg(EndReg) + 1;
4688 // If the register is the same as the start reg, there's nothing
4689 // more to do.
4690 if (Reg == EndReg)
4691 continue;
4692 // The register must be in the same register class as the first.
4693 if (!RC->contains(Reg))
4694 return Error(AfterMinusLoc, "invalid register in register list");
4695 // Ranges must go from low to high.
4696 if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
4697 return Error(AfterMinusLoc, "bad range in register list");
4698
4699 // Add all the registers in the range to the register list.
4700 while (Reg != EndReg) {
4702 EReg = MRI->getEncodingValue(Reg);
4703 if (VSCCLRMAdjustEncoding)
4704 EReg += 16;
4705 if (!insertNoDuplicates(Registers, EReg, Reg)) {
4706 Warning(AfterMinusLoc, StringRef("duplicated register (") +
4708 ") in register list");
4709 }
4710 }
4711 continue;
4712 }
4713 Parser.Lex(); // Eat the comma.
4714 RegLoc = Parser.getTok().getLoc();
4715 MCRegister OldReg = Reg;
4716 int EOldReg = EReg;
4717 const AsmToken RegTok = Parser.getTok();
4718 Reg = tryParseRegister(AllowOutOfBoundReg);
4719 if (!Reg)
4720 return Error(RegLoc, "register expected");
4721 if (!AllowRAAC && Reg == ARM::RA_AUTH_CODE)
4722 return Error(RegLoc, "pseudo-register not allowed");
4723 // Allow Q regs and just interpret them as the two D sub-registers.
4724 bool isQReg = false;
4725 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(