69 #define DEBUG_TYPE "asm-parser"
79 enum class ImplicitItModeTy {
Always,
Never, ARMOnly, ThumbOnly };
82 "arm-implicit-it",
cl::init(ImplicitItModeTy::ARMOnly),
83 cl::desc(
"Allow conditional instructions outdside of an IT block"),
85 "Accept in both ISAs, emit implicit ITs in Thumb"),
87 "Warn in ARM, reject in Thumb"),
89 "Accept in ARM, reject in Thumb"),
90 clEnumValN(ImplicitItModeTy::ThumbOnly,
"thumb",
91 "Warn in ARM, emit implicit ITs in Thumb")));
93 static cl::opt<bool> AddBuildAttributes(
"arm-add-build-attributes",
96 enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
98 static inline unsigned extractITMaskBit(
unsigned Mask,
unsigned Position) {
105 return (
Mask >> (5 - Position) & 1);
108 class UnwindContext {
114 Locs PersonalityLocs;
115 Locs PersonalityIndexLocs;
116 Locs HandlerDataLocs;
122 bool hasFnStart()
const {
return !FnStartLocs.empty(); }
123 bool cantUnwind()
const {
return !CantUnwindLocs.empty(); }
124 bool hasHandlerData()
const {
return !HandlerDataLocs.empty(); }
126 bool hasPersonality()
const {
127 return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
130 void recordFnStart(
SMLoc L) { FnStartLocs.push_back(L); }
131 void recordCantUnwind(
SMLoc L) { CantUnwindLocs.push_back(L); }
132 void recordPersonality(
SMLoc L) { PersonalityLocs.push_back(L); }
133 void recordHandlerData(
SMLoc L) { HandlerDataLocs.push_back(L); }
134 void recordPersonalityIndex(
SMLoc L) { PersonalityIndexLocs.push_back(L); }
136 void saveFPReg(
int Reg) { FPReg =
Reg; }
137 int getFPReg()
const {
return FPReg; }
139 void emitFnStartLocNotes()
const {
140 for (
const SMLoc &Loc : FnStartLocs)
141 Parser.
Note(Loc,
".fnstart was specified here");
144 void emitCantUnwindLocNotes()
const {
145 for (
const SMLoc &Loc : CantUnwindLocs)
146 Parser.
Note(Loc,
".cantunwind was specified here");
149 void emitHandlerDataLocNotes()
const {
150 for (
const SMLoc &Loc : HandlerDataLocs)
151 Parser.
Note(Loc,
".handlerdata was specified here");
154 void emitPersonalityLocNotes()
const {
155 for (Locs::const_iterator PI = PersonalityLocs.begin(),
156 PE = PersonalityLocs.end(),
157 PII = PersonalityIndexLocs.begin(),
158 PIE = PersonalityIndexLocs.end();
159 PI != PE || PII != PIE;) {
160 if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
161 Parser.
Note(*PI++,
".personality was specified here");
162 else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
163 Parser.
Note(*PII++,
".personalityindex was specified here");
166 "at the same location");
171 FnStartLocs = Locs();
172 CantUnwindLocs = Locs();
173 PersonalityLocs = Locs();
174 HandlerDataLocs = Locs();
175 PersonalityIndexLocs = Locs();
181 class ARMMnemonicSets {
192 return CDE.
count(Mnemonic);
197 bool isVPTPredicableCDEInstr(
StringRef Mnemonic) {
200 return CDEWithVPTSuffix.
count(Mnemonic);
205 bool isITPredicableCDEInstr(
StringRef Mnemonic) {
215 bool isCDEDualRegInstr(
StringRef Mnemonic) {
218 return Mnemonic ==
"cx1d" || Mnemonic ==
"cx1da" ||
219 Mnemonic ==
"cx2d" || Mnemonic ==
"cx2da" ||
220 Mnemonic ==
"cx3d" || Mnemonic ==
"cx3da";
225 for (
StringRef Mnemonic: {
"cx1",
"cx1a",
"cx1d",
"cx1da",
226 "cx2",
"cx2a",
"cx2d",
"cx2da",
227 "cx3",
"cx3a",
"cx3d",
"cx3da", })
230 {
"vcx1",
"vcx1a",
"vcx2",
"vcx2a",
"vcx3",
"vcx3a"}) {
232 CDEWithVPTSuffix.
insert(Mnemonic);
233 CDEWithVPTSuffix.
insert(std::string(Mnemonic) +
"t");
234 CDEWithVPTSuffix.
insert(std::string(Mnemonic) +
"e");
244 assert(getParser().getStreamer().getTargetStreamer() &&
245 "do not have a target streamer");
253 bool NextSymbolIsThumb;
255 bool useImplicitITThumb()
const {
256 return ImplicitItMode == ImplicitItModeTy::Always ||
257 ImplicitItMode == ImplicitItModeTy::ThumbOnly;
260 bool useImplicitITARM()
const {
261 return ImplicitItMode == ImplicitItModeTy::Always ||
262 ImplicitItMode == ImplicitItModeTy::ARMOnly;
277 unsigned CurPosition;
292 void flushPendingInstructions(
MCStreamer &Out)
override {
293 if (!inImplicitITBlock()) {
294 assert(PendingConditionalInsts.size() == 0);
306 assert(PendingConditionalInsts.size() <= 4);
307 for (
const MCInst &Inst : PendingConditionalInsts) {
310 PendingConditionalInsts.clear();
314 ITState.CurPosition = ~0U;
317 bool inITBlock() {
return ITState.CurPosition != ~0U; }
318 bool inExplicitITBlock() {
return inITBlock() && ITState.IsExplicit; }
319 bool inImplicitITBlock() {
return inITBlock() && !ITState.IsExplicit; }
321 bool lastInITBlock() {
325 void forwardITPosition() {
326 if (!inITBlock())
return;
331 if (++ITState.CurPosition == 5 - TZ && ITState.IsExplicit)
332 ITState.CurPosition = ~0U;
336 void rewindImplicitITPosition() {
337 assert(inImplicitITBlock());
338 assert(ITState.CurPosition > 1);
339 ITState.CurPosition--;
341 unsigned NewMask = 0;
342 NewMask |= ITState.Mask & (0xC << TZ);
343 NewMask |= 0x2 << TZ;
344 ITState.Mask = NewMask;
349 void discardImplicitITBlock() {
350 assert(inImplicitITBlock());
351 assert(ITState.CurPosition == 1);
352 ITState.CurPosition = ~0U;
356 unsigned getDRegFromQReg(
unsigned QReg)
const {
357 return MRI->getSubReg(QReg, ARM::dsub_0);
362 unsigned MaskBit = extractITMaskBit(ITState.Mask, ITState.CurPosition);
368 void invertCurrentITCondition() {
369 if (ITState.CurPosition == 1) {
372 ITState.Mask ^= 1 << (5 - ITState.CurPosition);
377 bool isITBlockFull() {
378 return inITBlock() && (ITState.Mask & 1);
384 assert(inImplicitITBlock());
389 unsigned NewMask = 0;
391 NewMask |= ITState.Mask & (0xE << TZ);
393 NewMask |= (
Cond != ITState.Cond) << TZ;
395 NewMask |= 1 << (TZ - 1);
396 ITState.Mask = NewMask;
400 void startImplicitITBlock() {
404 ITState.CurPosition = 1;
405 ITState.IsExplicit =
false;
416 ITState.CurPosition = 0;
417 ITState.IsExplicit =
true;
422 unsigned CurPosition;
424 bool inVPTBlock() {
return VPTState.CurPosition != ~0U; }
425 void forwardVPTPosition() {
426 if (!inVPTBlock())
return;
428 if (++VPTState.CurPosition == 5 - TZ)
429 VPTState.CurPosition = ~0U;
433 return getParser().Note(L,
Msg, Range);
437 return getParser().Warning(L,
Msg, Range);
441 return getParser().
Error(L,
Msg, Range);
445 unsigned ListNo,
bool IsARPop =
false);
449 int tryParseRegister();
452 bool parseRegisterList(
OperandVector &,
bool EnforceOrder =
true,
453 bool AllowRAAC =
false);
458 unsigned &ShiftAmount);
459 bool parseLiteralValues(
unsigned Size,
SMLoc L);
460 bool parseDirectiveThumb(
SMLoc L);
461 bool parseDirectiveARM(
SMLoc L);
462 bool parseDirectiveThumbFunc(
SMLoc L);
463 bool parseDirectiveCode(
SMLoc L);
464 bool parseDirectiveSyntax(
SMLoc L);
466 bool parseDirectiveUnreq(
SMLoc L);
467 bool parseDirectiveArch(
SMLoc L);
468 bool parseDirectiveEabiAttr(
SMLoc L);
469 bool parseDirectiveCPU(
SMLoc L);
470 bool parseDirectiveFPU(
SMLoc L);
471 bool parseDirectiveFnStart(
SMLoc L);
472 bool parseDirectiveFnEnd(
SMLoc L);
473 bool parseDirectiveCantUnwind(
SMLoc L);
474 bool parseDirectivePersonality(
SMLoc L);
475 bool parseDirectiveHandlerData(
SMLoc L);
476 bool parseDirectiveSetFP(
SMLoc L);
477 bool parseDirectivePad(
SMLoc L);
478 bool parseDirectiveRegSave(
SMLoc L,
bool IsVector);
479 bool parseDirectiveInst(
SMLoc L,
char Suffix =
'\0');
480 bool parseDirectiveLtorg(
SMLoc L);
481 bool parseDirectiveEven(
SMLoc L);
482 bool parseDirectivePersonalityIndex(
SMLoc L);
483 bool parseDirectiveUnwindRaw(
SMLoc L);
484 bool parseDirectiveTLSDescSeq(
SMLoc L);
485 bool parseDirectiveMovSP(
SMLoc L);
486 bool parseDirectiveObjectArch(
SMLoc L);
487 bool parseDirectiveArchExtension(
SMLoc L);
488 bool parseDirectiveAlign(
SMLoc L);
489 bool parseDirectiveThumbSet(
SMLoc L);
493 unsigned &PredicationCode,
494 unsigned &VPTPredicationCode,
bool &CarrySetting,
495 unsigned &ProcessorIMod,
StringRef &ITMask);
497 StringRef FullInst,
bool &CanAcceptCarrySet,
498 bool &CanAcceptPredicationCode,
499 bool &CanAcceptVPTPredicationCode);
502 void tryConvertingToTwoOperandForm(
StringRef Mnemonic,
bool CarrySetting,
508 return getSTI().getFeatureBits()[ARM::ModeThumb];
511 bool isThumbOne()
const {
512 return isThumb() && !getSTI().getFeatureBits()[ARM::FeatureThumb2];
515 bool isThumbTwo()
const {
516 return isThumb() && getSTI().getFeatureBits()[ARM::FeatureThumb2];
519 bool hasThumb()
const {
520 return getSTI().getFeatureBits()[ARM::HasV4TOps];
523 bool hasThumb2()
const {
524 return getSTI().getFeatureBits()[ARM::FeatureThumb2];
527 bool hasV6Ops()
const {
528 return getSTI().getFeatureBits()[ARM::HasV6Ops];
531 bool hasV6T2Ops()
const {
532 return getSTI().getFeatureBits()[ARM::HasV6T2Ops];
535 bool hasV6MOps()
const {
536 return getSTI().getFeatureBits()[ARM::HasV6MOps];
539 bool hasV7Ops()
const {
540 return getSTI().getFeatureBits()[ARM::HasV7Ops];
543 bool hasV8Ops()
const {
544 return getSTI().getFeatureBits()[ARM::HasV8Ops];
547 bool hasV8MBaseline()
const {
548 return getSTI().getFeatureBits()[ARM::HasV8MBaselineOps];
551 bool hasV8MMainline()
const {
552 return getSTI().getFeatureBits()[ARM::HasV8MMainlineOps];
554 bool hasV8_1MMainline()
const {
555 return getSTI().getFeatureBits()[ARM::HasV8_1MMainlineOps];
557 bool hasMVE()
const {
558 return getSTI().getFeatureBits()[ARM::HasMVEIntegerOps];
560 bool hasMVEFloat()
const {
561 return getSTI().getFeatureBits()[ARM::HasMVEFloatOps];
563 bool hasCDE()
const {
564 return getSTI().getFeatureBits()[ARM::HasCDEOps];
566 bool has8MSecExt()
const {
567 return getSTI().getFeatureBits()[ARM::Feature8MSecExt];
570 bool hasARM()
const {
571 return !getSTI().getFeatureBits()[ARM::FeatureNoARM];
574 bool hasDSP()
const {
575 return getSTI().getFeatureBits()[ARM::FeatureDSP];
578 bool hasD32()
const {
579 return getSTI().getFeatureBits()[ARM::FeatureD32];
582 bool hasV8_1aOps()
const {
583 return getSTI().getFeatureBits()[ARM::HasV8_1aOps];
586 bool hasRAS()
const {
587 return getSTI().getFeatureBits()[ARM::FeatureRAS];
592 auto FB = ComputeAvailableFeatures(STI.
ToggleFeature(ARM::ModeThumb));
593 setAvailableFeatures(FB);
596 void FixModeAfterArchChange(
bool WasThumb,
SMLoc Loc);
598 bool isMClass()
const {
599 return getSTI().getFeatureBits()[ARM::FeatureMClass];
605 #define GET_ASSEMBLER_HEADER
606 #include "ARMGenAsmMatcher.inc"
623 return parsePKHImm(
O,
"lsl", 0, 31);
626 return parsePKHImm(
O,
"asr", 1, 32);
650 bool isITBlockTerminator(
MCInst &Inst)
const;
653 bool Load,
bool ARMMode,
bool Writeback);
656 enum ARMMatchResultTy {
657 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
658 Match_RequiresNotITBlock,
660 Match_RequiresThumb2,
662 Match_RequiresFlagSetting,
663 #define GET_OPERAND_DIAGNOSTIC_TYPES
664 #include "ARMGenAsmMatcher.inc"
674 MRI = getContext().getRegisterInfo();
677 setAvailableFeatures(ComputeAvailableFeatures(STI.
getFeatureBits()));
680 if (AddBuildAttributes)
681 getTargetStreamer().emitTargetAttributes(STI);
684 ITState.CurPosition = ~0U;
686 VPTState.CurPosition = ~0U;
688 NextSymbolIsThumb =
false;
692 bool ParseRegister(
unsigned &RegNo,
SMLoc &StartLoc,
SMLoc &EndLoc)
override;
694 SMLoc &EndLoc)
override;
697 bool ParseDirective(
AsmToken DirectiveID)
override;
700 unsigned Kind)
override;
701 unsigned checkTargetMatchPredicate(
MCInst &Inst)
override;
703 bool MatchAndEmitInstruction(
SMLoc IDLoc,
unsigned &Opcode,
706 bool MatchingInlineAsm)
override;
709 bool MatchingInlineAsm,
bool &EmitInITBlock,
712 struct NearMissMessage {
717 const char *getCustomOperandDiag(ARMMatchResultTy MatchError);
743 k_InstSyncBarrierOpt,
744 k_TraceSyncBarrierOpt,
753 k_RegisterListWithAPSR,
756 k_FPSRegisterListWithVPR,
757 k_FPDRegisterListWithVPR,
759 k_VectorListAllLanes,
766 k_ConstantPoolImmediate,
767 k_BitfieldDescriptor,
771 SMLoc StartLoc, EndLoc, AlignmentLoc;
786 struct CoprocOptionOp {
828 struct VectorListOp {
835 struct VectorIndexOp {
849 unsigned OffsetRegNum;
854 unsigned isNegative : 1;
857 struct PostIdxRegOp {
864 struct ShifterImmOp {
869 struct RegShiftedRegOp {
876 struct RegShiftedImmOp {
900 struct CoprocOptionOp CoprocOption;
901 struct MBOptOp MBOpt;
902 struct ISBOptOp ISBOpt;
903 struct TSBOptOp TSBOpt;
904 struct ITMaskOp ITMask;
906 struct MMaskOp MMask;
907 struct BankedRegOp BankedReg;
910 struct VectorListOp VectorList;
911 struct VectorIndexOp VectorIndex;
914 struct PostIdxRegOp PostIdxReg;
915 struct ShifterImmOp ShifterImm;
916 struct RegShiftedRegOp RegShiftedReg;
917 struct RegShiftedImmOp RegShiftedImm;
918 struct RotImmOp RotImm;
919 struct ModImmOp ModImm;
924 ARMOperand(KindTy K) :
Kind(K) {}
927 SMLoc getStartLoc()
const override {
return StartLoc; }
930 SMLoc getEndLoc()
const override {
return EndLoc; }
937 SMLoc getAlignmentLoc()
const {
938 assert(
Kind == k_Memory &&
"Invalid access!");
943 assert(
Kind == k_CondCode &&
"Invalid access!");
948 assert(isVPTPred() &&
"Invalid access!");
952 unsigned getCoproc()
const {
953 assert((
Kind == k_CoprocNum ||
Kind == k_CoprocReg) &&
"Invalid access!");
962 unsigned getReg()
const override {
963 assert((
Kind == k_Register ||
Kind == k_CCOut) &&
"Invalid access!");
968 assert((
Kind == k_RegisterList ||
Kind == k_RegisterListWithAPSR ||
969 Kind == k_DPRRegisterList ||
Kind == k_SPRRegisterList ||
970 Kind == k_FPSRegisterListWithVPR ||
971 Kind == k_FPDRegisterListWithVPR) &&
976 const MCExpr *getImm()
const {
981 const MCExpr *getConstantPoolImm()
const {
982 assert(isConstantPoolImm() &&
"Invalid access!");
986 unsigned getVectorIndex()
const {
987 assert(
Kind == k_VectorIndex &&
"Invalid access!");
988 return VectorIndex.Val;
992 assert(
Kind == k_MemBarrierOpt &&
"Invalid access!");
997 assert(
Kind == k_InstSyncBarrierOpt &&
"Invalid access!");
1002 assert(
Kind == k_TraceSyncBarrierOpt &&
"Invalid access!");
1007 assert(
Kind == k_ProcIFlags &&
"Invalid access!");
1011 unsigned getMSRMask()
const {
1012 assert(
Kind == k_MSRMask &&
"Invalid access!");
1016 unsigned getBankedReg()
const {
1017 assert(
Kind == k_BankedReg &&
"Invalid access!");
1018 return BankedReg.Val;
1021 bool isCoprocNum()
const {
return Kind == k_CoprocNum; }
1022 bool isCoprocReg()
const {
return Kind == k_CoprocReg; }
1023 bool isCoprocOption()
const {
return Kind == k_CoprocOption; }
1024 bool isCondCode()
const {
return Kind == k_CondCode; }
1025 bool isVPTPred()
const {
return Kind == k_VPTPred; }
1026 bool isCCOut()
const {
return Kind == k_CCOut; }
1027 bool isITMask()
const {
return Kind == k_ITCondMask; }
1028 bool isITCondCode()
const {
return Kind == k_CondCode; }
1029 bool isImm()
const override {
1030 return Kind == k_Immediate;
1033 bool isARMBranchTarget()
const {
1034 if (!
isImm())
return false;
1036 if (
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1037 return CE->getValue() % 4 == 0;
1042 bool isThumbBranchTarget()
const {
1043 if (!
isImm())
return false;
1045 if (
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1046 return CE->getValue() % 2 == 0;
1052 template<
unsigned w
idth,
unsigned scale>
1053 bool isUnsignedOffset()
const {
1054 if (!
isImm())
return false;
1055 if (isa<MCSymbolRefExpr>(Imm.Val))
return true;
1056 if (
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1057 int64_t Val =
CE->getValue();
1059 int64_t
Max =
Align * ((1LL << width) - 1);
1060 return ((Val %
Align) == 0) && (Val >= 0) && (Val <= Max);
1067 template<
unsigned w
idth,
unsigned scale>
1068 bool isSignedOffset()
const {
1069 if (!
isImm())
return false;
1070 if (isa<MCSymbolRefExpr>(Imm.Val))
return true;
1071 if (
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1072 int64_t Val =
CE->getValue();
1074 int64_t
Max =
Align * ((1LL << (width-1)) - 1);
1075 int64_t Min = -
Align * (1LL << (width-1));
1076 return ((Val %
Align) == 0) && (Val >= Min) && (Val <= Max);
1083 bool isLEOffset()
const {
1084 if (!
isImm())
return false;
1085 if (isa<MCSymbolRefExpr>(Imm.Val))
return true;
1086 if (
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1087 int64_t Val =
CE->getValue();
1088 return Val < 0 && Val >= -4094 && (Val & 1) == 0;
1097 bool isThumbMemPC()
const {
1100 if (isa<MCSymbolRefExpr>(Imm.Val))
return true;
1102 if (!CE)
return false;
1103 Val =
CE->getValue();
1105 else if (isGPRMem()) {
1106 if(!
Memory.OffsetImm ||
Memory.OffsetRegNum)
return false;
1107 if(
Memory.BaseRegNum != ARM::PC)
return false;
1108 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm))
1109 Val =
CE->getValue();
1114 return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
1117 bool isFPImm()
const {
1118 if (!
isImm())
return false;
1120 if (!CE)
return false;
1125 template<
int64_t N,
int64_t M>
1126 bool isImmediate()
const {
1127 if (!
isImm())
return false;
1129 if (!CE)
return false;
1130 int64_t
Value =
CE->getValue();
1134 template<
int64_t N,
int64_t M>
1135 bool isImmediateS4()
const {
1136 if (!
isImm())
return false;
1138 if (!CE)
return false;
1139 int64_t
Value =
CE->getValue();
1142 template<
int64_t N,
int64_t M>
1143 bool isImmediateS2()
const {
1144 if (!
isImm())
return false;
1146 if (!CE)
return false;
1147 int64_t
Value =
CE->getValue();
1150 bool isFBits16()
const {
1151 return isImmediate<0, 17>();
1153 bool isFBits32()
const {
1154 return isImmediate<1, 33>();
1156 bool isImm8s4()
const {
1157 return isImmediateS4<-1020, 1020>();
1159 bool isImm7s4()
const {
1160 return isImmediateS4<-508, 508>();
1162 bool isImm7Shift0()
const {
1163 return isImmediate<-127, 127>();
1165 bool isImm7Shift1()
const {
1166 return isImmediateS2<-255, 255>();
1168 bool isImm7Shift2()
const {
1169 return isImmediateS4<-511, 511>();
1171 bool isImm7()
const {
1172 return isImmediate<-127, 127>();
1174 bool isImm0_1020s4()
const {
1175 return isImmediateS4<0, 1020>();
1177 bool isImm0_508s4()
const {
1178 return isImmediateS4<0, 508>();
1180 bool isImm0_508s4Neg()
const {
1181 if (!
isImm())
return false;
1183 if (!CE)
return false;
1184 int64_t
Value = -
CE->getValue();
1189 bool isImm0_4095Neg()
const {
1190 if (!
isImm())
return false;
1192 if (!CE)
return false;
1197 if ((
CE->getValue() >> 32) > 0)
return false;
1202 bool isImm0_7()
const {
1203 return isImmediate<0, 7>();
1206 bool isImm1_16()
const {
1207 return isImmediate<1, 16>();
1210 bool isImm1_32()
const {
1211 return isImmediate<1, 32>();
1214 bool isImm8_255()
const {
1215 return isImmediate<8, 255>();
1218 bool isImm256_65535Expr()
const {
1219 if (!
isImm())
return false;
1223 if (!CE)
return true;
1224 int64_t
Value =
CE->getValue();
1228 bool isImm0_65535Expr()
const {
1229 if (!
isImm())
return false;
1233 if (!CE)
return true;
1234 int64_t
Value =
CE->getValue();
1238 bool isImm24bit()
const {
1239 return isImmediate<0, 0xffffff + 1>();
1242 bool isImmThumbSR()
const {
1243 return isImmediate<1, 33>();
1255 bool isExpImm()
const {
1256 if (!
isImm())
return false;
1258 if (!CE)
return false;
1260 return isExpImmValue<shift>(
CE->getValue());
1263 template<
int shift,
int size>
1264 bool isInvertedExpImm()
const {
1265 if (!
isImm())
return false;
1267 if (!CE)
return false;
1271 return isExpImmValue<shift>(InvertedValue);
1274 bool isPKHLSLImm()
const {
1275 return isImmediate<0, 32>();
1278 bool isPKHASRImm()
const {
1279 return isImmediate<0, 33>();
1282 bool isAdrLabel()
const {
1285 if (
isImm() && !isa<MCConstantExpr>(getImm()))
1289 if (!
isImm())
return false;
1291 if (!CE)
return false;
1292 int64_t
Value =
CE->getValue();
1297 bool isT2SOImm()
const {
1300 if (
isImm() && !isa<MCConstantExpr>(getImm())) {
1303 const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(getImm());
1307 if (!
isImm())
return false;
1309 if (!CE)
return false;
1310 int64_t
Value =
CE->getValue();
1314 bool isT2SOImmNot()
const {
1315 if (!
isImm())
return false;
1317 if (!CE)
return false;
1318 int64_t
Value =
CE->getValue();
1323 bool isT2SOImmNeg()
const {
1324 if (!
isImm())
return false;
1326 if (!CE)
return false;
1327 int64_t
Value =
CE->getValue();
1333 bool isSetEndImm()
const {
1334 if (!
isImm())
return false;
1336 if (!CE)
return false;
1337 int64_t
Value =
CE->getValue();
1341 bool isReg()
const override {
return Kind == k_Register; }
1342 bool isRegList()
const {
return Kind == k_RegisterList; }
1343 bool isRegListWithAPSR()
const {
1344 return Kind == k_RegisterListWithAPSR ||
Kind == k_RegisterList;
1346 bool isDPRRegList()
const {
return Kind == k_DPRRegisterList; }
1347 bool isSPRRegList()
const {
return Kind == k_SPRRegisterList; }
1348 bool isFPSRegListWithVPR()
const {
return Kind == k_FPSRegisterListWithVPR; }
1349 bool isFPDRegListWithVPR()
const {
return Kind == k_FPDRegisterListWithVPR; }
1350 bool isToken()
const override {
return Kind == k_Token; }
1351 bool isMemBarrierOpt()
const {
return Kind == k_MemBarrierOpt; }
1352 bool isInstSyncBarrierOpt()
const {
return Kind == k_InstSyncBarrierOpt; }
1353 bool isTraceSyncBarrierOpt()
const {
return Kind == k_TraceSyncBarrierOpt; }
1354 bool isMem()
const override {
1355 return isGPRMem() || isMVEMem();
1357 bool isMVEMem()
const {
1358 if (
Kind != k_Memory)
1361 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
Memory.BaseRegNum) &&
1362 !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
Memory.BaseRegNum))
1364 if (
Memory.OffsetRegNum &&
1365 !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1370 bool isGPRMem()
const {
1371 if (
Kind != k_Memory)
1374 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
Memory.BaseRegNum))
1376 if (
Memory.OffsetRegNum &&
1377 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
Memory.OffsetRegNum))
1381 bool isShifterImm()
const {
return Kind == k_ShifterImmediate; }
1382 bool isRegShiftedReg()
const {
1383 return Kind == k_ShiftedRegister &&
1384 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1385 RegShiftedReg.SrcReg) &&
1386 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1387 RegShiftedReg.ShiftReg);
1389 bool isRegShiftedImm()
const {
1390 return Kind == k_ShiftedImmediate &&
1391 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1392 RegShiftedImm.SrcReg);
1394 bool isRotImm()
const {
return Kind == k_RotateImmediate; }
1396 template<
unsigned Min,
unsigned Max>
1397 bool isPowerTwoInRange()
const {
1398 if (!
isImm())
return false;
1400 if (!CE)
return false;
1401 int64_t
Value =
CE->getValue();
1405 bool isModImm()
const {
return Kind == k_ModifiedImmediate; }
1407 bool isModImmNot()
const {
1408 if (!
isImm())
return false;
1410 if (!CE)
return false;
1411 int64_t
Value =
CE->getValue();
1415 bool isModImmNeg()
const {
1416 if (!
isImm())
return false;
1418 if (!CE)
return false;
1419 int64_t
Value =
CE->getValue();
1424 bool isThumbModImmNeg1_7()
const {
1425 if (!
isImm())
return false;
1427 if (!CE)
return false;
1428 int32_t
Value = -(int32_t)
CE->getValue();
1432 bool isThumbModImmNeg8_255()
const {
1433 if (!
isImm())
return false;
1435 if (!CE)
return false;
1436 int32_t
Value = -(int32_t)
CE->getValue();
1440 bool isConstantPoolImm()
const {
return Kind == k_ConstantPoolImmediate; }
1441 bool isBitfield()
const {
return Kind == k_BitfieldDescriptor; }
1442 bool isPostIdxRegShifted()
const {
1443 return Kind == k_PostIndexRegister &&
1444 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(PostIdxReg.RegNum);
1446 bool isPostIdxReg()
const {
1449 bool isMemNoOffset(
bool alignOK =
false,
unsigned Alignment = 0)
const {
1453 return Memory.OffsetRegNum == 0 &&
Memory.OffsetImm ==
nullptr &&
1456 bool isMemNoOffsetT2(
bool alignOK =
false,
unsigned Alignment = 0)
const {
1460 if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].
contains(
1465 return Memory.OffsetRegNum == 0 &&
Memory.OffsetImm ==
nullptr &&
1468 bool isMemNoOffsetT2NoSp(
bool alignOK =
false,
unsigned Alignment = 0)
const {
1472 if (!ARMMCRegisterClasses[ARM::rGPRRegClassID].
contains(
1477 return Memory.OffsetRegNum == 0 &&
Memory.OffsetImm ==
nullptr &&
1480 bool isMemNoOffsetT(
bool alignOK =
false,
unsigned Alignment = 0)
const {
1484 if (!ARMMCRegisterClasses[ARM::tGPRRegClassID].
contains(
1489 return Memory.OffsetRegNum == 0 &&
Memory.OffsetImm ==
nullptr &&
1492 bool isMemPCRelImm12()
const {
1493 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
1496 if (
Memory.BaseRegNum != ARM::PC)
1499 if (!
Memory.OffsetImm)
return true;
1500 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1501 int64_t Val =
CE->getValue();
1502 return (Val > -4096 && Val < 4096) ||
1508 bool isAlignedMemory()
const {
1509 return isMemNoOffset(
true);
1512 bool isAlignedMemoryNone()
const {
1513 return isMemNoOffset(
false, 0);
1516 bool isDupAlignedMemoryNone()
const {
1517 return isMemNoOffset(
false, 0);
1520 bool isAlignedMemory16()
const {
1521 if (isMemNoOffset(
false, 2))
1523 return isMemNoOffset(
false, 0);
1526 bool isDupAlignedMemory16()
const {
1527 if (isMemNoOffset(
false, 2))
1529 return isMemNoOffset(
false, 0);
1532 bool isAlignedMemory32()
const {
1533 if (isMemNoOffset(
false, 4))
1535 return isMemNoOffset(
false, 0);
1538 bool isDupAlignedMemory32()
const {
1539 if (isMemNoOffset(
false, 4))
1541 return isMemNoOffset(
false, 0);
1544 bool isAlignedMemory64()
const {
1545 if (isMemNoOffset(
false, 8))
1547 return isMemNoOffset(
false, 0);
1550 bool isDupAlignedMemory64()
const {
1551 if (isMemNoOffset(
false, 8))
1553 return isMemNoOffset(
false, 0);
1556 bool isAlignedMemory64or128()
const {
1557 if (isMemNoOffset(
false, 8))
1559 if (isMemNoOffset(
false, 16))
1561 return isMemNoOffset(
false, 0);
1564 bool isDupAlignedMemory64or128()
const {
1565 if (isMemNoOffset(
false, 8))
1567 if (isMemNoOffset(
false, 16))
1569 return isMemNoOffset(
false, 0);
1572 bool isAlignedMemory64or128or256()
const {
1573 if (isMemNoOffset(
false, 8))
1575 if (isMemNoOffset(
false, 16))
1577 if (isMemNoOffset(
false, 32))
1579 return isMemNoOffset(
false, 0);
1582 bool isAddrMode2()
const {
1583 if (!isGPRMem() ||
Memory.Alignment != 0)
return false;
1585 if (
Memory.OffsetRegNum)
return true;
1587 if (!
Memory.OffsetImm)
return true;
1588 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1589 int64_t Val =
CE->getValue();
1590 return Val > -4096 && Val < 4096;
1595 bool isAM2OffsetImm()
const {
1596 if (!
isImm())
return false;
1599 if (!CE)
return false;
1600 int64_t Val =
CE->getValue();
1602 (Val > -4096 && Val < 4096);
1605 bool isAddrMode3()
const {
1609 if (
isImm() && !isa<MCConstantExpr>(getImm()))
1611 if (!isGPRMem() ||
Memory.Alignment != 0)
return false;
1615 if (
Memory.OffsetRegNum)
return true;
1617 if (!
Memory.OffsetImm)
return true;
1618 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1619 int64_t Val =
CE->getValue();
1622 return (Val > -256 && Val < 256) ||
1628 bool isAM3Offset()
const {
1635 if (!CE)
return false;
1636 int64_t Val =
CE->getValue();
1638 return (Val > -256 && Val < 256) ||
1642 bool isAddrMode5()
const {
1646 if (
isImm() && !isa<MCConstantExpr>(getImm()))
1648 if (!isGPRMem() ||
Memory.Alignment != 0)
return false;
1650 if (
Memory.OffsetRegNum)
return false;
1652 if (!
Memory.OffsetImm)
return true;
1653 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1654 int64_t Val =
CE->getValue();
1655 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1661 bool isAddrMode5FP16()
const {
1665 if (
isImm() && !isa<MCConstantExpr>(getImm()))
1667 if (!isGPRMem() ||
Memory.Alignment != 0)
return false;
1669 if (
Memory.OffsetRegNum)
return false;
1671 if (!
Memory.OffsetImm)
return true;
1672 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1673 int64_t Val =
CE->getValue();
1674 return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) ||
1680 bool isMemTBB()
const {
1681 if (!isGPRMem() || !
Memory.OffsetRegNum ||
Memory.isNegative ||
1687 bool isMemTBH()
const {
1688 if (!isGPRMem() || !
Memory.OffsetRegNum ||
Memory.isNegative ||
1695 bool isMemRegOffset()
const {
1696 if (!isGPRMem() || !
Memory.OffsetRegNum ||
Memory.Alignment != 0)
1701 bool isT2MemRegOffset()
const {
1702 if (!isGPRMem() || !
Memory.OffsetRegNum ||
Memory.isNegative ||
1713 bool isMemThumbRR()
const {
1716 if (!isGPRMem() || !
Memory.OffsetRegNum ||
Memory.isNegative ||
1723 bool isMemThumbRIs4()
const {
1724 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
1728 if (!
Memory.OffsetImm)
return true;
1729 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1730 int64_t Val =
CE->getValue();
1731 return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1736 bool isMemThumbRIs2()
const {
1737 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
1741 if (!
Memory.OffsetImm)
return true;
1742 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1743 int64_t Val =
CE->getValue();
1744 return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1749 bool isMemThumbRIs1()
const {
1750 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
1754 if (!
Memory.OffsetImm)
return true;
1755 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1756 int64_t Val =
CE->getValue();
1757 return Val >= 0 && Val <= 31;
1762 bool isMemThumbSPI()
const {
1763 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
1767 if (!
Memory.OffsetImm)
return true;
1768 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1769 int64_t Val =
CE->getValue();
1770 return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1775 bool isMemImm8s4Offset()
const {
1779 if (
isImm() && !isa<MCConstantExpr>(getImm()))
1781 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
1784 if (!
Memory.OffsetImm)
return true;
1785 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1786 int64_t Val =
CE->getValue();
1788 return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) ||
1794 bool isMemImm7s4Offset()
const {
1798 if (
isImm() && !isa<MCConstantExpr>(getImm()))
1800 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0 ||
1801 !ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1805 if (!
Memory.OffsetImm)
return true;
1806 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1807 int64_t Val =
CE->getValue();
1809 return (Val >= -508 && Val <= 508 && (Val & 3) == 0) || Val == INT32_MIN;
1814 bool isMemImm0_1020s4Offset()
const {
1815 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
1818 if (!
Memory.OffsetImm)
return true;
1819 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1820 int64_t Val =
CE->getValue();
1821 return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1826 bool isMemImm8Offset()
const {
1827 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
1830 if (
Memory.BaseRegNum == ARM::PC)
return false;
1832 if (!
Memory.OffsetImm)
return true;
1833 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1834 int64_t Val =
CE->getValue();
1836 (Val > -256 && Val < 256);
1841 template<
unsigned Bits,
unsigned RegClassID>
1842 bool isMemImm7ShiftedOffset()
const {
1843 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0 ||
1844 !ARMMCRegisterClasses[RegClassID].contains(
Memory.BaseRegNum))
1850 if (!
Memory.OffsetImm)
return true;
1851 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1852 int64_t Val =
CE->getValue();
1856 if (Val == INT32_MIN)
1859 unsigned Divisor = 1U <<
Bits;
1862 if (Val % Divisor != 0)
1867 return (Val >= -127 && Val <= 127);
1872 template <
int shift>
bool isMemRegRQOffset()
const {
1873 if (!isMVEMem() ||
Memory.OffsetImm !=
nullptr ||
Memory.Alignment != 0)
1876 if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].
contains(
1879 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].
contains(
1893 template <
int shift>
bool isMemRegQOffset()
const {
1894 if (!isMVEMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
1897 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].
contains(
1903 static_assert(
shift < 56,
1904 "Such that we dont shift by a value higher than 62");
1905 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1906 int64_t Val =
CE->getValue();
1909 if ((Val & ((1U <<
shift) - 1)) != 0)
1915 int64_t Range = (1U << (7 +
shift)) - 1;
1916 return (Val == INT32_MIN) || (Val > -Range && Val < Range);
1921 bool isMemPosImm8Offset()
const {
1922 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
1925 if (!
Memory.OffsetImm)
return true;
1926 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1927 int64_t Val =
CE->getValue();
1928 return Val >= 0 && Val < 256;
1933 bool isMemNegImm8Offset()
const {
1934 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
1937 if (
Memory.BaseRegNum == ARM::PC)
return false;
1939 if (!
Memory.OffsetImm)
return false;
1940 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1941 int64_t Val =
CE->getValue();
1943 (Val > -256 && Val < 0);
1948 bool isMemUImm12Offset()
const {
1949 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
1952 if (!
Memory.OffsetImm)
return true;
1953 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1954 int64_t Val =
CE->getValue();
1955 return (Val >= 0 && Val < 4096);
1960 bool isMemImm12Offset()
const {
1965 if (
isImm() && !isa<MCConstantExpr>(getImm()))
1968 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
1971 if (!
Memory.OffsetImm)
return true;
1972 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1973 int64_t Val =
CE->getValue();
1974 return (Val > -4096 && Val < 4096) ||
1982 bool isConstPoolAsmImm()
const {
1985 return (isConstantPoolImm());
1988 bool isPostIdxImm8()
const {
1989 if (!
isImm())
return false;
1991 if (!CE)
return false;
1992 int64_t Val =
CE->getValue();
1993 return (Val > -256 && Val < 256) ||
1997 bool isPostIdxImm8s4()
const {
1998 if (!
isImm())
return false;
2000 if (!CE)
return false;
2001 int64_t Val =
CE->getValue();
2002 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
2006 bool isMSRMask()
const {
return Kind == k_MSRMask; }
2007 bool isBankedReg()
const {
return Kind == k_BankedReg; }
2008 bool isProcIFlags()
const {
return Kind == k_ProcIFlags; }
2011 bool isSingleSpacedVectorList()
const {
2012 return Kind == k_VectorList && !VectorList.isDoubleSpaced;
2015 bool isDoubleSpacedVectorList()
const {
2016 return Kind == k_VectorList && VectorList.isDoubleSpaced;
2019 bool isVecListOneD()
const {
2020 if (!isSingleSpacedVectorList())
return false;
2021 return VectorList.Count == 1;
2024 bool isVecListTwoMQ()
const {
2025 return isSingleSpacedVectorList() && VectorList.Count == 2 &&
2026 ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2030 bool isVecListDPair()
const {
2031 if (!isSingleSpacedVectorList())
return false;
2032 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2036 bool isVecListThreeD()
const {
2037 if (!isSingleSpacedVectorList())
return false;
2038 return VectorList.Count == 3;
2041 bool isVecListFourD()
const {
2042 if (!isSingleSpacedVectorList())
return false;
2043 return VectorList.Count == 4;
2046 bool isVecListDPairSpaced()
const {
2047 if (
Kind != k_VectorList)
return false;
2048 if (isSingleSpacedVectorList())
return false;
2049 return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
2053 bool isVecListThreeQ()
const {
2054 if (!isDoubleSpacedVectorList())
return false;
2055 return VectorList.Count == 3;
2058 bool isVecListFourQ()
const {
2059 if (!isDoubleSpacedVectorList())
return false;
2060 return VectorList.Count == 4;
2063 bool isVecListFourMQ()
const {
2064 return isSingleSpacedVectorList() && VectorList.Count == 4 &&
2065 ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2069 bool isSingleSpacedVectorAllLanes()
const {
2070 return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
2073 bool isDoubleSpacedVectorAllLanes()
const {
2074 return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
2077 bool isVecListOneDAllLanes()
const {
2078 if (!isSingleSpacedVectorAllLanes())
return false;
2079 return VectorList.Count == 1;
2082 bool isVecListDPairAllLanes()
const {
2083 if (!isSingleSpacedVectorAllLanes())
return false;
2084 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2088 bool isVecListDPairSpacedAllLanes()
const {
2089 if (!isDoubleSpacedVectorAllLanes())
return false;
2090 return VectorList.Count == 2;
2093 bool isVecListThreeDAllLanes()
const {
2094 if (!isSingleSpacedVectorAllLanes())
return false;
2095 return VectorList.Count == 3;
2098 bool isVecListThreeQAllLanes()
const {
2099 if (!isDoubleSpacedVectorAllLanes())
return false;
2100 return VectorList.Count == 3;
2103 bool isVecListFourDAllLanes()
const {
2104 if (!isSingleSpacedVectorAllLanes())
return false;
2105 return VectorList.Count == 4;
2108 bool isVecListFourQAllLanes()
const {
2109 if (!isDoubleSpacedVectorAllLanes())
return false;
2110 return VectorList.Count == 4;
2113 bool isSingleSpacedVectorIndexed()
const {
2114 return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
2117 bool isDoubleSpacedVectorIndexed()
const {
2118 return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
2121 bool isVecListOneDByteIndexed()
const {
2122 if (!isSingleSpacedVectorIndexed())
return false;
2123 return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
2126 bool isVecListOneDHWordIndexed()
const {
2127 if (!isSingleSpacedVectorIndexed())
return false;
2128 return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
2131 bool isVecListOneDWordIndexed()
const {
2132 if (!isSingleSpacedVectorIndexed())
return false;
2133 return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
2136 bool isVecListTwoDByteIndexed()
const {
2137 if (!isSingleSpacedVectorIndexed())
return false;
2138 return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
2141 bool isVecListTwoDHWordIndexed()
const {
2142 if (!isSingleSpacedVectorIndexed())
return false;
2143 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2146 bool isVecListTwoQWordIndexed()
const {
2147 if (!isDoubleSpacedVectorIndexed())
return false;
2148 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2151 bool isVecListTwoQHWordIndexed()
const {
2152 if (!isDoubleSpacedVectorIndexed())
return false;
2153 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2156 bool isVecListTwoDWordIndexed()
const {
2157 if (!isSingleSpacedVectorIndexed())
return false;
2158 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2161 bool isVecListThreeDByteIndexed()
const {
2162 if (!isSingleSpacedVectorIndexed())
return false;
2163 return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
2166 bool isVecListThreeDHWordIndexed()
const {
2167 if (!isSingleSpacedVectorIndexed())
return false;
2168 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2171 bool isVecListThreeQWordIndexed()
const {
2172 if (!isDoubleSpacedVectorIndexed())
return false;
2173 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2176 bool isVecListThreeQHWordIndexed()
const {
2177 if (!isDoubleSpacedVectorIndexed())
return false;
2178 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2181 bool isVecListThreeDWordIndexed()
const {
2182 if (!isSingleSpacedVectorIndexed())
return false;
2183 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2186 bool isVecListFourDByteIndexed()
const {
2187 if (!isSingleSpacedVectorIndexed())
return false;
2188 return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
2191 bool isVecListFourDHWordIndexed()
const {
2192 if (!isSingleSpacedVectorIndexed())
return false;
2193 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2196 bool isVecListFourQWordIndexed()
const {
2197 if (!isDoubleSpacedVectorIndexed())
return false;
2198 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2201 bool isVecListFourQHWordIndexed()
const {
2202 if (!isDoubleSpacedVectorIndexed())
return false;
2203 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2206 bool isVecListFourDWordIndexed()
const {
2207 if (!isSingleSpacedVectorIndexed())
return false;
2208 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2211 bool isVectorIndex()
const {
return Kind == k_VectorIndex; }
2213 template <
unsigned NumLanes>
2214 bool isVectorIndexInRange()
const {
2215 if (
Kind != k_VectorIndex)
return false;
2216 return VectorIndex.Val < NumLanes;
2219 bool isVectorIndex8()
const {
return isVectorIndexInRange<8>(); }
2220 bool isVectorIndex16()
const {
return isVectorIndexInRange<4>(); }
2221 bool isVectorIndex32()
const {
return isVectorIndexInRange<2>(); }
2222 bool isVectorIndex64()
const {
return isVectorIndexInRange<1>(); }
2224 template<
int PermittedValue,
int OtherPermittedValue>
2225 bool isMVEPairVectorIndex()
const {
2226 if (
Kind != k_VectorIndex)
return false;
2227 return VectorIndex.Val == PermittedValue ||
2228 VectorIndex.Val == OtherPermittedValue;
2231 bool isNEONi8splat()
const {
2232 if (!
isImm())
return false;
2235 if (!CE)
return false;
2236 int64_t
Value =
CE->getValue();
2243 if (isNEONByteReplicate(2))
2249 if (!CE)
return false;
2250 unsigned Value =
CE->getValue();
2254 bool isNEONi16splatNot()
const {
2259 if (!CE)
return false;
2260 unsigned Value =
CE->getValue();
2265 if (isNEONByteReplicate(4))
2271 if (!CE)
return false;
2272 unsigned Value =
CE->getValue();
2276 bool isNEONi32splatNot()
const {
2281 if (!CE)
return false;
2282 unsigned Value =
CE->getValue();
2286 static bool isValidNEONi32vmovImm(int64_t
Value) {
2289 return ((
Value & 0xffffffffffffff00) == 0) ||
2290 ((
Value & 0xffffffffffff00ff) == 0) ||
2291 ((
Value & 0xffffffffff00ffff) == 0) ||
2292 ((
Value & 0xffffffff00ffffff) == 0) ||
2293 ((
Value & 0xffffffffffff00ff) == 0xff) ||
2294 ((
Value & 0xffffffffff00ffff) == 0xffff);
2297 bool isNEONReplicate(
unsigned Width,
unsigned NumElems,
bool Inv)
const {
2299 "Invalid element width");
2300 assert(NumElems *
Width <= 64 &&
"Invalid result width");
2308 int64_t
Value =
CE->getValue();
2316 if (
Width == 16 && (Elem & 0x00ff) != 0 && (Elem & 0xff00) != 0)
2318 if (
Width == 32 && !isValidNEONi32vmovImm(Elem))
2321 for (
unsigned i = 1;
i < NumElems; ++
i) {
2329 bool isNEONByteReplicate(
unsigned NumBytes)
const {
2330 return isNEONReplicate(8, NumBytes,
false);
2333 static void checkNeonReplicateArgs(
unsigned FromW,
unsigned ToW) {
2334 assert((FromW == 8 || FromW == 16 || FromW == 32) &&
2335 "Invalid source width");
2336 assert((ToW == 16 || ToW == 32 || ToW == 64) &&
2337 "Invalid destination width");
2338 assert(FromW < ToW &&
"ToW is not less than FromW");
2341 template<
unsigned FromW,
unsigned ToW>
2342 bool isNEONmovReplicate()
const {
2343 checkNeonReplicateArgs(FromW, ToW);
2344 if (ToW == 64 && isNEONi64splat())
2346 return isNEONReplicate(FromW, ToW / FromW,
false);
2349 template<
unsigned FromW,
unsigned ToW>
2350 bool isNEONinvReplicate()
const {
2351 checkNeonReplicateArgs(FromW, ToW);
2352 return isNEONReplicate(FromW, ToW / FromW,
true);
2355 bool isNEONi32vmov()
const {
2356 if (isNEONByteReplicate(4))
2364 return isValidNEONi32vmovImm(
CE->getValue());
2367 bool isNEONi32vmovNeg()
const {
2368 if (!
isImm())
return false;
2371 if (!CE)
return false;
2372 return isValidNEONi32vmovImm(~
CE->getValue());
2375 bool isNEONi64splat()
const {
2376 if (!
isImm())
return false;
2379 if (!CE)
return false;
2382 for (
unsigned i = 0; i < 8; ++i, Value >>= 8)
2383 if ((
Value & 0xff) != 0 && (
Value & 0xff) != 0xff)
return false;
2387 template<
int64_t Angle,
int64_t Remainder>
2388 bool isComplexRotation()
const {
2389 if (!
isImm())
return false;
2392 if (!CE)
return false;
2395 return (
Value % Angle == Remainder &&
Value <= 270);
2398 bool isMVELongShift()
const {
2399 if (!
isImm())
return false;
2402 if (!CE)
return false;
2407 bool isMveSaturateOp()
const {
2408 if (!
isImm())
return false;
2410 if (!CE)
return false;
2415 bool isITCondCodeNoAL()
const {
2416 if (!isITCondCode())
return false;
2421 bool isITCondCodeRestrictedI()
const {
2422 if (!isITCondCode())
2428 bool isITCondCodeRestrictedS()
const {
2429 if (!isITCondCode())
2436 bool isITCondCodeRestrictedU()
const {
2437 if (!isITCondCode())
2443 bool isITCondCodeRestrictedFP()
const {
2444 if (!isITCondCode())
2455 else if (
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
2461 void addARMBranchTargetOperands(
MCInst &Inst,
unsigned N)
const {
2462 assert(
N == 1 &&
"Invalid number of operands!");
2463 addExpr(Inst, getImm());
2466 void addThumbBranchTargetOperands(
MCInst &Inst,
unsigned N)
const {
2467 assert(
N == 1 &&
"Invalid number of operands!");
2468 addExpr(Inst, getImm());
2471 void addCondCodeOperands(
MCInst &Inst,
unsigned N)
const {
2472 assert(
N == 2 &&
"Invalid number of operands!");
2478 void addVPTPredNOperands(
MCInst &Inst,
unsigned N)
const {
2479 assert(
N == 3 &&
"Invalid number of operands!");
2481 unsigned RegNum = getVPTPred() ==
ARMVCC::None ? 0: ARM::P0;
2486 void addVPTPredROperands(
MCInst &Inst,
unsigned N)
const {
2487 assert(
N == 4 &&
"Invalid number of operands!");
2488 addVPTPredNOperands(Inst,
N-1);
2497 "Inactive register in vpred_r is not tied to an output!");
2503 void addCoprocNumOperands(
MCInst &Inst,
unsigned N)
const {
2504 assert(
N == 1 &&
"Invalid number of operands!");
2508 void addCoprocRegOperands(
MCInst &Inst,
unsigned N)
const {
2509 assert(
N == 1 &&
"Invalid number of operands!");
2513 void addCoprocOptionOperands(
MCInst &Inst,
unsigned N)
const {
2514 assert(
N == 1 &&
"Invalid number of operands!");
2518 void addITMaskOperands(
MCInst &Inst,
unsigned N)
const {
2519 assert(
N == 1 &&
"Invalid number of operands!");
2523 void addITCondCodeOperands(
MCInst &Inst,
unsigned N)
const {
2524 assert(
N == 1 &&
"Invalid number of operands!");
2528 void addITCondCodeInvOperands(
MCInst &Inst,
unsigned N)
const {
2529 assert(
N == 1 &&
"Invalid number of operands!");
2533 void addCCOutOperands(
MCInst &Inst,
unsigned N)
const {
2534 assert(
N == 1 &&
"Invalid number of operands!");
2538 void addRegOperands(
MCInst &Inst,
unsigned N)
const {
2539 assert(
N == 1 &&
"Invalid number of operands!");
2543 void addRegShiftedRegOperands(
MCInst &Inst,
unsigned N)
const {
2544 assert(
N == 3 &&
"Invalid number of operands!");
2545 assert(isRegShiftedReg() &&
2546 "addRegShiftedRegOperands() on non-RegShiftedReg!");
2553 void addRegShiftedImmOperands(
MCInst &Inst,
unsigned N)
const {
2554 assert(
N == 2 &&
"Invalid number of operands!");
2555 assert(isRegShiftedImm() &&
2556 "addRegShiftedImmOperands() on non-RegShiftedImm!");
2559 unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
2564 void addShifterImmOperands(
MCInst &Inst,
unsigned N)
const {
2565 assert(
N == 1 &&
"Invalid number of operands!");
2570 void addRegListOperands(
MCInst &Inst,
unsigned N)
const {
2571 assert(
N == 1 &&
"Invalid number of operands!");
2573 for (
unsigned Reg : RegList)
2577 void addRegListWithAPSROperands(
MCInst &Inst,
unsigned N)
const {
2578 assert(
N == 1 &&
"Invalid number of operands!");
2580 for (
unsigned Reg : RegList)
2584 void addDPRRegListOperands(
MCInst &Inst,
unsigned N)
const {
2585 addRegListOperands(Inst,
N);
2588 void addSPRRegListOperands(
MCInst &Inst,
unsigned N)
const {
2589 addRegListOperands(Inst,
N);
2592 void addFPSRegListWithVPROperands(
MCInst &Inst,
unsigned N)
const {
2593 addRegListOperands(Inst,
N);
2596 void addFPDRegListWithVPROperands(
MCInst &Inst,
unsigned N)
const {
2597 addRegListOperands(Inst,
N);
2600 void addRotImmOperands(
MCInst &Inst,
unsigned N)
const {
2601 assert(
N == 1 &&
"Invalid number of operands!");
2606 void addModImmOperands(
MCInst &Inst,
unsigned N)
const {
2607 assert(
N == 1 &&
"Invalid number of operands!");
2611 return addImmOperands(Inst,
N);
2616 void addModImmNotOperands(
MCInst &Inst,
unsigned N)
const {
2617 assert(
N == 1 &&
"Invalid number of operands!");
2623 void addModImmNegOperands(
MCInst &Inst,
unsigned N)
const {
2624 assert(
N == 1 &&
"Invalid number of operands!");
2630 void addThumbModImmNeg8_255Operands(
MCInst &Inst,
unsigned N)
const {
2631 assert(
N == 1 &&
"Invalid number of operands!");
2637 void addThumbModImmNeg1_7Operands(
MCInst &Inst,
unsigned N)
const {
2638 assert(
N == 1 &&
"Invalid number of operands!");
2644 void addBitfieldOperands(
MCInst &Inst,
unsigned N)
const {
2645 assert(
N == 1 &&
"Invalid number of operands!");
2651 (32 - (lsb + width)));
2655 void addImmOperands(
MCInst &Inst,
unsigned N)
const {
2656 assert(
N == 1 &&
"Invalid number of operands!");
2657 addExpr(Inst, getImm());
2660 void addFBits16Operands(
MCInst &Inst,
unsigned N)
const {
2661 assert(
N == 1 &&
"Invalid number of operands!");
2666 void addFBits32Operands(
MCInst &Inst,
unsigned N)
const {
2667 assert(
N == 1 &&
"Invalid number of operands!");
2672 void addFPImmOperands(
MCInst &Inst,
unsigned N)
const {
2673 assert(
N == 1 &&
"Invalid number of operands!");
2679 void addImm8s4Operands(
MCInst &Inst,
unsigned N)
const {
2680 assert(
N == 1 &&
"Invalid number of operands!");
2687 void addImm7s4Operands(
MCInst &Inst,
unsigned N)
const {
2688 assert(
N == 1 &&
"Invalid number of operands!");
2695 void addImm7Shift0Operands(
MCInst &Inst,
unsigned N)
const {
2696 assert(
N == 1 &&
"Invalid number of operands!");
2701 void addImm7Shift1Operands(
MCInst &Inst,
unsigned N)
const {
2702 assert(
N == 1 &&
"Invalid number of operands!");
2707 void addImm7Shift2Operands(
MCInst &Inst,
unsigned N)
const {
2708 assert(
N == 1 &&
"Invalid number of operands!");
2713 void addImm7Operands(
MCInst &Inst,
unsigned N)
const {
2714 assert(
N == 1 &&
"Invalid number of operands!");
2719 void addImm0_1020s4Operands(
MCInst &Inst,
unsigned N)
const {
2720 assert(
N == 1 &&
"Invalid number of operands!");
2727 void addImm0_508s4NegOperands(
MCInst &Inst,
unsigned N)
const {
2728 assert(
N == 1 &&
"Invalid number of operands!");
2735 void addImm0_508s4Operands(
MCInst &Inst,
unsigned N)
const {
2736 assert(
N == 1 &&
"Invalid number of operands!");
2743 void addImm1_16Operands(
MCInst &Inst,
unsigned N)
const {
2744 assert(
N == 1 &&
"Invalid number of operands!");
2751 void addImm1_32Operands(
MCInst &Inst,
unsigned N)
const {
2752 assert(
N == 1 &&
"Invalid number of operands!");
2759 void addImmThumbSROperands(
MCInst &Inst,
unsigned N)
const {
2760 assert(
N == 1 &&
"Invalid number of operands!");
2764 unsigned Imm =
CE->getValue();
2768 void addPKHASRImmOperands(
MCInst &Inst,
unsigned N)
const {
2769 assert(
N == 1 &&
"Invalid number of operands!");
2773 int Val =
CE->getValue();
2777 void addT2SOImmNotOperands(
MCInst &Inst,
unsigned N)
const {
2778 assert(
N == 1 &&
"Invalid number of operands!");
2785 void addT2SOImmNegOperands(
MCInst &Inst,
unsigned N)
const {
2786 assert(
N == 1 &&
"Invalid number of operands!");
2793 void addImm0_4095NegOperands(
MCInst &Inst,
unsigned N)
const {
2794 assert(
N == 1 &&
"Invalid number of operands!");
2801 void addUnsignedOffset_b8s2Operands(
MCInst &Inst,
unsigned N)
const {
2802 if(
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
2810 void addThumbMemPCOperands(
MCInst &Inst,
unsigned N)
const {
2811 assert(
N == 1 &&
"Invalid number of operands!");
2823 assert(isGPRMem() &&
"Unknown value type!");
2824 assert(isa<MCConstantExpr>(
Memory.OffsetImm) &&
"Unknown value type!");
2825 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm))
2831 void addMemBarrierOptOperands(
MCInst &Inst,
unsigned N)
const {
2832 assert(
N == 1 &&
"Invalid number of operands!");
2836 void addInstSyncBarrierOptOperands(
MCInst &Inst,
unsigned N)
const {
2837 assert(
N == 1 &&
"Invalid number of operands!");
2841 void addTraceSyncBarrierOptOperands(
MCInst &Inst,
unsigned N)
const {
2842 assert(
N == 1 &&
"Invalid number of operands!");
2846 void addMemNoOffsetOperands(
MCInst &Inst,
unsigned N)
const {
2847 assert(
N == 1 &&
"Invalid number of operands!");
2851 void addMemNoOffsetT2Operands(
MCInst &Inst,
unsigned N)
const {
2852 assert(
N == 1 &&
"Invalid number of operands!");
2856 void addMemNoOffsetT2NoSpOperands(
MCInst &Inst,
unsigned N)
const {
2857 assert(
N == 1 &&
"Invalid number of operands!");
2861 void addMemNoOffsetTOperands(
MCInst &Inst,
unsigned N)
const {
2862 assert(
N == 1 &&
"Invalid number of operands!");
2866 void addMemPCRelImm12Operands(
MCInst &Inst,
unsigned N)
const {
2867 assert(
N == 1 &&
"Invalid number of operands!");
2868 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm))
2874 void addAdrLabelOperands(
MCInst &Inst,
unsigned N)
const {
2875 assert(
N == 1 &&
"Invalid number of operands!");
2880 if (!isa<MCConstantExpr>(getImm())) {
2886 int Val =
CE->getValue();
2890 void addAlignedMemoryOperands(
MCInst &Inst,
unsigned N)
const {
2891 assert(
N == 2 &&
"Invalid number of operands!");
2896 void addDupAlignedMemoryNoneOperands(
MCInst &Inst,
unsigned N)
const {
2897 addAlignedMemoryOperands(Inst,
N);
2900 void addAlignedMemoryNoneOperands(
MCInst &Inst,
unsigned N)
const {
2901 addAlignedMemoryOperands(Inst,
N);
2904 void addAlignedMemory16Operands(
MCInst &Inst,
unsigned N)
const {
2905 addAlignedMemoryOperands(Inst,
N);
2908 void addDupAlignedMemory16Operands(
MCInst &Inst,
unsigned N)
const {
2909 addAlignedMemoryOperands(Inst,
N);
2912 void addAlignedMemory32Operands(
MCInst &Inst,
unsigned N)
const {
2913 addAlignedMemoryOperands(Inst,
N);
2916 void addDupAlignedMemory32Operands(
MCInst &Inst,
unsigned N)
const {
2917 addAlignedMemoryOperands(Inst,
N);
2920 void addAlignedMemory64Operands(
MCInst &Inst,
unsigned N)
const {
2921 addAlignedMemoryOperands(Inst,
N);
2924 void addDupAlignedMemory64Operands(
MCInst &Inst,
unsigned N)
const {
2925 addAlignedMemoryOperands(Inst,
N);
2928 void addAlignedMemory64or128Operands(
MCInst &Inst,
unsigned N)
const {
2929 addAlignedMemoryOperands(Inst,
N);
2932 void addDupAlignedMemory64or128Operands(
MCInst &Inst,
unsigned N)
const {
2933 addAlignedMemoryOperands(Inst,
N);
2936 void addAlignedMemory64or128or256Operands(
MCInst &Inst,
unsigned N)
const {
2937 addAlignedMemoryOperands(Inst,
N);
2940 void addAddrMode2Operands(
MCInst &Inst,
unsigned N)
const {
2941 assert(
N == 3 &&
"Invalid number of operands!");
2944 if (!
Memory.OffsetRegNum) {
2947 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
2948 int32_t Val =
CE->getValue();
2969 void addAM2OffsetImmOperands(
MCInst &Inst,
unsigned N)
const {
2970 assert(
N == 2 &&
"Invalid number of operands!");
2972 assert(CE &&
"non-constant AM2OffsetImm operand!");
2973 int32_t Val =
CE->getValue();
2977 if (Val < 0) Val = -Val;
2983 void addAddrMode3Operands(
MCInst &Inst,
unsigned N)
const {
2984 assert(
N == 3 &&
"Invalid number of operands!");
2997 if (!
Memory.OffsetRegNum) {
3000 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
3001 int32_t Val =
CE->getValue();
3021 void addAM3OffsetOperands(
MCInst &Inst,
unsigned N)
const {
3022 assert(
N == 2 &&
"Invalid number of operands!");
3023 if (
Kind == k_PostIndexRegister) {
3033 int32_t Val =
CE->getValue();
3037 if (Val < 0) Val = -Val;
3043 void addAddrMode5Operands(
MCInst &Inst,
unsigned N)
const {
3044 assert(
N == 2 &&
"Invalid number of operands!");
3057 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
3059 int32_t Val =
CE->getValue() / 4;
3072 void addAddrMode5FP16Operands(
MCInst &Inst,
unsigned N)
const {
3073 assert(
N == 2 &&
"Invalid number of operands!");
3087 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
3088 int32_t Val =
CE->getValue() / 2;
3101 void addMemImm8s4OffsetOperands(
MCInst &Inst,
unsigned N)
const {
3102 assert(
N == 2 &&
"Invalid number of operands!");
3113 addExpr(Inst,
Memory.OffsetImm);
3116 void addMemImm7s4OffsetOperands(
MCInst &Inst,
unsigned N)
const {
3117 assert(
N == 2 &&
"Invalid number of operands!");
3128 addExpr(Inst,
Memory.OffsetImm);
3131 void addMemImm0_1020s4OffsetOperands(
MCInst &Inst,
unsigned N)
const {
3132 assert(
N == 2 &&
"Invalid number of operands!");
3136 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm))
3143 void addMemImmOffsetOperands(
MCInst &Inst,
unsigned N)
const {
3144 assert(
N == 2 &&
"Invalid number of operands!");
3146 addExpr(Inst,
Memory.OffsetImm);
3149 void addMemRegRQOffsetOperands(
MCInst &Inst,
unsigned N)
const {
3150 assert(
N == 2 &&
"Invalid number of operands!");
3155 void addMemUImm12OffsetOperands(
MCInst &Inst,
unsigned N)
const {
3156 assert(
N == 2 &&
"Invalid number of operands!");
3159 addExpr(Inst, getImm());
3166 addExpr(Inst,
Memory.OffsetImm);
3169 void addMemImm12OffsetOperands(
MCInst &Inst,
unsigned N)
const {
3170 assert(
N == 2 &&
"Invalid number of operands!");
3173 addExpr(Inst, getImm());
3180 addExpr(Inst,
Memory.OffsetImm);
3183 void addConstPoolAsmImmOperands(
MCInst &Inst,
unsigned N)
const {
3184 assert(
N == 1 &&
"Invalid number of operands!");
3187 addExpr(Inst, getConstantPoolImm());
3190 void addMemTBBOperands(
MCInst &Inst,
unsigned N)
const {
3191 assert(
N == 2 &&
"Invalid number of operands!");
3196 void addMemTBHOperands(
MCInst &Inst,
unsigned N)
const {
3197 assert(
N == 2 &&
"Invalid number of operands!");
3202 void addMemRegOffsetOperands(
MCInst &Inst,
unsigned N)
const {
3203 assert(
N == 3 &&
"Invalid number of operands!");
3212 void addT2MemRegOffsetOperands(
MCInst &Inst,
unsigned N)
const {
3213 assert(
N == 3 &&
"Invalid number of operands!");
3219 void addMemThumbRROperands(
MCInst &Inst,
unsigned N)
const {
3220 assert(
N == 2 &&
"Invalid number of operands!");
3225 void addMemThumbRIs4Operands(
MCInst &Inst,
unsigned N)
const {
3226 assert(
N == 2 &&
"Invalid number of operands!");
3230 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm))
3237 void addMemThumbRIs2Operands(
MCInst &Inst,
unsigned N)
const {
3238 assert(
N == 2 &&
"Invalid number of operands!");
3242 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm))
3248 void addMemThumbRIs1Operands(
MCInst &Inst,
unsigned N)
const {
3249 assert(
N == 2 &&
"Invalid number of operands!");
3251 addExpr(Inst,
Memory.OffsetImm);
3254 void addMemThumbSPIOperands(
MCInst &Inst,
unsigned N)
const {
3255 assert(
N == 2 &&
"Invalid number of operands!");
3259 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm))
3266 void addPostIdxImm8Operands(
MCInst &Inst,
unsigned N)
const {
3267 assert(
N == 1 &&
"Invalid number of operands!");
3269 assert(CE &&
"non-constant post-idx-imm8 operand!");
3270 int Imm =
CE->getValue();
3271 bool isAdd = Imm >= 0;
3273 Imm = (Imm < 0 ? -Imm : Imm) | (
int)isAdd << 8;
3277 void addPostIdxImm8s4Operands(
MCInst &Inst,
unsigned N)
const {
3278 assert(
N == 1 &&
"Invalid number of operands!");
3280 assert(CE &&
"non-constant post-idx-imm8s4 operand!");
3281 int Imm =
CE->getValue();
3282 bool isAdd = Imm >= 0;
3285 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (
int)isAdd << 8;
3289 void addPostIdxRegOperands(
MCInst &Inst,
unsigned N)
const {
3290 assert(
N == 2 &&
"Invalid number of operands!");
3295 void addPostIdxRegShiftedOperands(
MCInst &Inst,
unsigned N)
const {
3296 assert(
N == 2 &&
"Invalid number of operands!");
3302 PostIdxReg.ShiftTy);
3306 void addPowerTwoOperands(
MCInst &Inst,
unsigned N)
const {
3307 assert(
N == 1 &&
"Invalid number of operands!");
3312 void addMSRMaskOperands(
MCInst &Inst,
unsigned N)
const {
3313 assert(
N == 1 &&
"Invalid number of operands!");
3317 void addBankedRegOperands(
MCInst &Inst,
unsigned N)
const {
3318 assert(
N == 1 &&
"Invalid number of operands!");
3322 void addProcIFlagsOperands(
MCInst &Inst,
unsigned N)
const {
3323 assert(
N == 1 &&
"Invalid number of operands!");
3327 void addVecListOperands(
MCInst &Inst,
unsigned N)
const {
3328 assert(
N == 1 &&
"Invalid number of operands!");
3332 void addMVEVecListOperands(
MCInst &Inst,
unsigned N)
const {
3333 assert(
N == 1 &&
"Invalid number of operands!");
3349 const MCRegisterClass *RC_in = &ARMMCRegisterClasses[ARM::MQPRRegClassID];
3351 (VectorList.Count == 2) ? &ARMMCRegisterClasses[ARM::MQQPRRegClassID]
3352 : &ARMMCRegisterClasses[ARM::MQQQQPRRegClassID];
3355 for (
I = 0;
I <
E;
I++)
3358 assert(
I <
E &&
"Invalid vector list start register!");
3363 void addVecListIndexedOperands(
MCInst &Inst,
unsigned N)
const {
3364 assert(
N == 2 &&
"Invalid number of operands!");
3369 void addVectorIndex8Operands(
MCInst &Inst,
unsigned N)
const {
3370 assert(
N == 1 &&
"Invalid number of operands!");
3374 void addVectorIndex16Operands(
MCInst &Inst,
unsigned N)
const {
3375 assert(
N == 1 &&
"Invalid number of operands!");
3379 void addVectorIndex32Operands(
MCInst &Inst,
unsigned N)
const {
3380 assert(
N == 1 &&
"Invalid number of operands!");
3384 void addVectorIndex64Operands(
MCInst &Inst,
unsigned N)
const {
3385 assert(
N == 1 &&
"Invalid number of operands!");
3389 void addMVEVectorIndexOperands(
MCInst &Inst,
unsigned N)
const {
3390 assert(
N == 1 &&
"Invalid number of operands!");
3394 void addMVEPairVectorIndexOperands(
MCInst &Inst,
unsigned N)
const {
3395 assert(
N == 1 &&
"Invalid number of operands!");
3399 void addNEONi8splatOperands(
MCInst &Inst,
unsigned N)
const {
3400 assert(
N == 1 &&
"Invalid number of operands!");
3407 void addNEONi16splatOperands(
MCInst &Inst,
unsigned N)
const {
3408 assert(
N == 1 &&
"Invalid number of operands!");
3411 unsigned Value =
CE->getValue();
3416 void addNEONi16splatNotOperands(
MCInst &Inst,
unsigned N)
const {
3417 assert(
N == 1 &&
"Invalid number of operands!");
3420 unsigned Value =
CE->getValue();
3425 void addNEONi32splatOperands(
MCInst &Inst,
unsigned N)
const {
3426 assert(
N == 1 &&
"Invalid number of operands!");
3429 unsigned Value =
CE->getValue();
3434 void addNEONi32splatNotOperands(
MCInst &Inst,
unsigned N)
const {
3435 assert(
N == 1 &&
"Invalid number of operands!");
3438 unsigned Value =
CE->getValue();
3443 void addNEONi8ReplicateOperands(
MCInst &Inst,
bool Inv)
const {
3448 "All instructions that wants to replicate non-zero byte "
3449 "always must be replaced with VMOVv8i8 or VMOVv16i8.");
3450 unsigned Value =
CE->getValue();
3453 unsigned B =
Value & 0xff;
3458 void addNEONinvi8ReplicateOperands(
MCInst &Inst,
unsigned N)
const {
3459 assert(
N == 1 &&
"Invalid number of operands!");
3460 addNEONi8ReplicateOperands(Inst,
true);
3463 static unsigned encodeNeonVMOVImmediate(
unsigned Value) {
3466 else if (
Value > 0xffff &&
Value <= 0xffffff)
3468 else if (
Value > 0xffffff)
3473 void addNEONi32vmovOperands(
MCInst &Inst,
unsigned N)
const {
3474 assert(
N == 1 &&
"Invalid number of operands!");
3477 unsigned Value = encodeNeonVMOVImmediate(
CE->getValue());
3481 void addNEONvmovi8ReplicateOperands(
MCInst &Inst,
unsigned N)
const {
3482 assert(
N == 1 &&
"Invalid number of operands!");
3483 addNEONi8ReplicateOperands(Inst,
false);
3486 void addNEONvmovi16ReplicateOperands(
MCInst &Inst,
unsigned N)
const {
3487 assert(
N == 1 &&
"Invalid number of operands!");
3493 "All instructions that want to replicate non-zero half-word "
3494 "always must be replaced with V{MOV,MVN}v{4,8}i16.");
3496 unsigned Elem =
Value & 0xffff;
3498 Elem = (Elem >> 8) | 0x200;
3502 void addNEONi32vmovNegOperands(
MCInst &Inst,
unsigned N)
const {
3503 assert(
N == 1 &&
"Invalid number of operands!");
3506 unsigned Value = encodeNeonVMOVImmediate(~
CE->getValue());
3510 void addNEONvmovi32ReplicateOperands(
MCInst &Inst,
unsigned N)
const {
3511 assert(
N == 1 &&
"Invalid number of operands!");
3517 "All instructions that want to replicate non-zero word "
3518 "always must be replaced with V{MOV,MVN}v{2,4}i32.");
3520 unsigned Elem = encodeNeonVMOVImmediate(
Value & 0xffffffff);
3524 void addNEONi64splatOperands(
MCInst &Inst,
unsigned N)
const {
3525 assert(
N == 1 &&
"Invalid number of operands!");
3530 for (
unsigned i = 0; i < 8; ++i, Value >>= 8) {
3536 void addComplexRotationEvenOperands(
MCInst &Inst,
unsigned N)
const {
3537 assert(
N == 1 &&
"Invalid number of operands!");
3542 void addComplexRotationOddOperands(
MCInst &Inst,
unsigned N)
const {
3543 assert(
N == 1 &&
"Invalid number of operands!");
3548 void addMveSaturateOperands(
MCInst &Inst,
unsigned N)
const {
3549 assert(
N == 1 &&
"Invalid number of operands!");
3551 unsigned Imm =
CE->getValue();
3552 assert((Imm == 48 || Imm == 64) &&
"Invalid saturate operand");
3558 static std::unique_ptr<ARMOperand> CreateITMask(
unsigned Mask,
SMLoc S) {
3559 auto Op = std::make_unique<ARMOperand>(k_ITCondMask);
3568 auto Op = std::make_unique<ARMOperand>(k_CondCode);
3577 auto Op = std::make_unique<ARMOperand>(k_VPTPred);
3584 static std::unique_ptr<ARMOperand> CreateCoprocNum(
unsigned CopVal,
SMLoc S) {
3585 auto Op = std::make_unique<ARMOperand>(k_CoprocNum);
3586 Op->Cop.Val = CopVal;
3592 static std::unique_ptr<ARMOperand> CreateCoprocReg(
unsigned CopVal,
SMLoc S) {
3593 auto Op = std::make_unique<ARMOperand>(k_CoprocReg);
3594 Op->Cop.Val = CopVal;
3600 static std::unique_ptr<ARMOperand> CreateCoprocOption(
unsigned Val,
SMLoc S,
3602 auto Op = std::make_unique<ARMOperand>(k_CoprocOption);
3609 static std::unique_ptr<ARMOperand> CreateCCOut(
unsigned RegNum,
SMLoc S) {
3610 auto Op = std::make_unique<ARMOperand>(k_CCOut);
3611 Op->Reg.RegNum = RegNum;
3617 static std::unique_ptr<ARMOperand> CreateToken(
StringRef Str,
SMLoc S) {
3618 auto Op = std::make_unique<ARMOperand>(k_Token);
3619 Op->Tok.Data = Str.data();
3620 Op->Tok.Length = Str.size();
3626 static std::unique_ptr<ARMOperand> CreateReg(
unsigned RegNum,
SMLoc S,
3628 auto Op = std::make_unique<ARMOperand>(k_Register);
3629 Op->Reg.RegNum = RegNum;
3635 static std::unique_ptr<ARMOperand>
3637 unsigned ShiftReg,
unsigned ShiftImm,
SMLoc S,
3639 auto Op = std::make_unique<ARMOperand>(k_ShiftedRegister);
3640 Op->RegShiftedReg.ShiftTy = ShTy;
3641 Op->RegShiftedReg.SrcReg = SrcReg;
3642 Op->RegShiftedReg.ShiftReg = ShiftReg;
3643 Op->RegShiftedReg.ShiftImm = ShiftImm;
3649 static std::unique_ptr<ARMOperand>
3652 auto Op = std::make_unique<ARMOperand>(k_ShiftedImmediate);
3653 Op->RegShiftedImm.ShiftTy = ShTy;
3654 Op->RegShiftedImm.SrcReg = SrcReg;
3655 Op->RegShiftedImm.ShiftImm = ShiftImm;
3661 static std::unique_ptr<ARMOperand> CreateShifterImm(
bool isASR,
unsigned Imm,
3663 auto Op = std::make_unique<ARMOperand>(k_ShifterImmediate);
3664 Op->ShifterImm.isASR = isASR;
3665 Op->ShifterImm.Imm = Imm;
3671 static std::unique_ptr<ARMOperand> CreateRotImm(
unsigned Imm,
SMLoc S,
3673 auto Op = std::make_unique<ARMOperand>(k_RotateImmediate);
3674 Op->RotImm.Imm = Imm;
3680 static std::unique_ptr<ARMOperand> CreateModImm(
unsigned Bits,
unsigned Rot,
3682 auto Op = std::make_unique<ARMOperand>(k_ModifiedImmediate);
3684 Op->ModImm.Rot = Rot;
3690 static std::unique_ptr<ARMOperand>
3692 auto Op = std::make_unique<ARMOperand>(k_ConstantPoolImmediate);
3699 static std::unique_ptr<ARMOperand>
3701 auto Op = std::make_unique<ARMOperand>(k_BitfieldDescriptor);
3702 Op->Bitfield.LSB = LSB;
3709 static std::unique_ptr<ARMOperand>
3712 assert(Regs.size() > 0 &&
"RegList contains no registers?");
3713 KindTy
Kind = k_RegisterList;
3715 if (ARMMCRegisterClasses[ARM::DPRRegClassID].
contains(
3716 Regs.front().second)) {
3717 if (Regs.back().second == ARM::VPR)
3718 Kind = k_FPDRegisterListWithVPR;
3720 Kind = k_DPRRegisterList;
3721 }
else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
contains(
3722 Regs.front().second)) {
3723 if (Regs.back().second == ARM::VPR)
3724 Kind = k_FPSRegisterListWithVPR;
3726 Kind = k_SPRRegisterList;
3729 if (
Kind == k_RegisterList && Regs.back().second == ARM::APSR)
3730 Kind = k_RegisterListWithAPSR;
3734 auto Op = std::make_unique<ARMOperand>(
Kind);
3735 for (
const auto &
P : Regs)
3736 Op->Registers.push_back(
P.second);
3738 Op->StartLoc = StartLoc;
3739 Op->EndLoc = EndLoc;
3743 static std::unique_ptr<ARMOperand> CreateVectorList(
unsigned RegNum,
3745 bool isDoubleSpaced,
3747 auto Op = std::make_unique<ARMOperand>(k_VectorList);
3748 Op->VectorList.RegNum = RegNum;
3749 Op->VectorList.Count = Count;
3750 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3756 static std::unique_ptr<ARMOperand>
3757 CreateVectorListAllLanes(
unsigned RegNum,
unsigned Count,
bool isDoubleSpaced,
3759 auto Op = std::make_unique<ARMOperand>(k_VectorListAllLanes);
3760 Op->VectorList.RegNum = RegNum;
3761 Op->VectorList.Count = Count;
3762 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3768 static std::unique_ptr<ARMOperand>
3769 CreateVectorListIndexed(
unsigned RegNum,
unsigned Count,
unsigned Index,
3771 auto Op = std::make_unique<ARMOperand>(k_VectorListIndexed);
3772 Op->VectorList.RegNum = RegNum;
3773 Op->VectorList.Count = Count;
3774 Op->VectorList.LaneIndex =
Index;
3775 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3781 static std::unique_ptr<ARMOperand>
3783 auto Op = std::make_unique<ARMOperand>(k_VectorIndex);
3784 Op->VectorIndex.Val = Idx;
3790 static std::unique_ptr<ARMOperand> CreateImm(
const MCExpr *Val,
SMLoc S,
3792 auto Op = std::make_unique<ARMOperand>(k_Immediate);
3799 static std::unique_ptr<ARMOperand>
3800 CreateMem(
unsigned BaseRegNum,
const MCExpr *OffsetImm,
unsigned OffsetRegNum,
3803 auto Op = std::make_unique<ARMOperand>(k_Memory);
3804 Op->Memory.BaseRegNum = BaseRegNum;
3805 Op->Memory.OffsetImm = OffsetImm;
3806 Op->Memory.OffsetRegNum = OffsetRegNum;
3807 Op->Memory.ShiftType = ShiftType;
3808 Op->Memory.ShiftImm = ShiftImm;
3810 Op->Memory.isNegative = isNegative;
3813 Op->AlignmentLoc = AlignmentLoc;
3817 static std::unique_ptr<ARMOperand>
3820 auto Op = std::make_unique<ARMOperand>(k_PostIndexRegister);
3821 Op->PostIdxReg.RegNum = RegNum;
3822 Op->PostIdxReg.isAdd = isAdd;
3823 Op->PostIdxReg.ShiftTy = ShiftTy;
3824 Op->PostIdxReg.ShiftImm = ShiftImm;
3830 static std::unique_ptr<ARMOperand> CreateMemBarrierOpt(
ARM_MB::MemBOpt Opt,
3832 auto Op = std::make_unique<ARMOperand>(k_MemBarrierOpt);
3833 Op->MBOpt.Val = Opt;
3839 static std::unique_ptr<ARMOperand>
3841 auto Op = std::make_unique<ARMOperand>(k_InstSyncBarrierOpt);
3842 Op->ISBOpt.Val = Opt;
3848 static std::unique_ptr<ARMOperand>
3850 auto Op = std::make_unique<ARMOperand>(k_TraceSyncBarrierOpt);
3851 Op->TSBOpt.Val = Opt;
3859 auto Op = std::make_unique<ARMOperand>(k_ProcIFlags);
3866 static std::unique_ptr<ARMOperand> CreateMSRMask(
unsigned MMask,
SMLoc S) {
3867 auto Op = std::make_unique<ARMOperand>(k_MSRMask);
3868 Op->MMask.Val = MMask;
3874 static std::unique_ptr<ARMOperand> CreateBankedReg(
unsigned Reg,
SMLoc S) {
3875 auto Op = std::make_unique<ARMOperand>(k_BankedReg);
3876 Op->BankedReg.Val =
Reg;
3903 case k_ITCondMask: {
3904 static const char *
const MaskStr[] = {
3905 "(invalid)",
"(tttt)",
"(ttt)",
"(ttte)",
3906 "(tt)",
"(ttet)",
"(tte)",
"(ttee)",
3907 "(t)",
"(tett)",
"(tet)",
"(tete)",
3908 "(te)",
"(teet)",
"(tee)",
"(teee)",
3910 assert((ITMask.Mask & 0xf) == ITMask.Mask);
3911 OS <<
"<it-mask " << MaskStr[ITMask.Mask] <<
">";
3915 OS <<
"<coprocessor number: " << getCoproc() <<
">";
3918 OS <<
"<coprocessor register: " << getCoproc() <<
">";
3920 case k_CoprocOption:
3921 OS <<
"<coprocessor option: " << CoprocOption.Val <<
">";
3924 OS <<
"<mask: " << getMSRMask() <<
">";
3927 OS <<
"<banked reg: " << getBankedReg() <<
">";
3932 case k_MemBarrierOpt:
3933 OS <<
"<ARM_MB::" <<
MemBOptToString(getMemBarrierOpt(),
false) <<
">";
3935 case k_InstSyncBarrierOpt:
3938 case k_TraceSyncBarrierOpt:
3946 OS <<
" offset-imm:" << *
Memory.OffsetImm;
3948 OS <<
" offset-reg:" << (
Memory.isNegative ?
"-" :
"")
3952 OS <<
" shift-imm:" <<
Memory.ShiftImm;
3955 OS <<
" alignment:" <<
Memory.Alignment;
3958 case k_PostIndexRegister:
3959 OS <<
"post-idx register " << (PostIdxReg.isAdd ?
"" :
"-")
3960 <<
RegName(PostIdxReg.RegNum);
3963 << PostIdxReg.ShiftImm;
3966 case k_ProcIFlags: {
3967 OS <<
"<ARM_PROC::";
3968 unsigned IFlags = getProcIFlags();
3969 for (
int i=2;
i >= 0; --
i)
3978 case k_ShifterImmediate:
3979 OS <<
"<shift " << (ShifterImm.isASR ?
"asr" :
"lsl")
3980 <<
" #" << ShifterImm.Imm <<
">";
3982 case k_ShiftedRegister:
3983 OS <<
"<so_reg_reg " <<
RegName(RegShiftedReg.SrcReg) <<
" "
3985 <<
RegName(RegShiftedReg.ShiftReg) <<
">";
3987 case k_ShiftedImmediate:
3988 OS <<
"<so_reg_imm " <<
RegName(RegShiftedImm.SrcReg) <<
" "
3990 << RegShiftedImm.ShiftImm <<
">";
3992 case k_RotateImmediate:
3993 OS <<
"<ror " <<
" #" << (RotImm.Imm * 8) <<
">";
3995 case k_ModifiedImmediate:
3996 OS <<
"<mod_imm #" << ModImm.Bits <<
", #"
3997 << ModImm.Rot <<
")>";
3999 case k_ConstantPoolImmediate:
4000 OS <<
"<constant_pool_imm #" << *getConstantPoolImm();
4002 case k_BitfieldDescriptor:
4003 OS <<
"<bitfield " <<
"lsb: " <<
Bitfield.LSB
4004 <<
", width: " <<
Bitfield.Width <<
">";
4006 case k_RegisterList:
4007 case k_RegisterListWithAPSR:
4008 case k_DPRRegisterList:
4009 case k_SPRRegisterList:
4010 case k_FPSRegisterListWithVPR:
4011 case k_FPDRegisterListWithVPR: {
4012 OS <<
"<register_list ";
4016 I = RegList.begin(),
E = RegList.end();
I !=
E; ) {
4018 if (++
I <
E) OS <<
", ";
4025 OS <<
"<vector_list " << VectorList.Count <<
" * "
4026 <<
RegName(VectorList.RegNum) <<
">";
4028 case k_VectorListAllLanes:
4029 OS <<
"<vector_list(all lanes) " << VectorList.Count <<
" * "
4030 <<
RegName(VectorList.RegNum) <<
">";
4032 case k_VectorListIndexed:
4033 OS <<
"<vector_list(lane " << VectorList.LaneIndex <<
") "
4034 << VectorList.Count <<
" * " <<
RegName(VectorList.RegNum) <<
">";
4037 OS <<
"'" << getToken() <<
"'";
4040 OS <<
"<vectorindex " << getVectorIndex() <<
">";
4052 bool ARMAsmParser::ParseRegister(
unsigned &RegNo,
4054 const AsmToken &Tok = getParser().getTok();
4057 RegNo = tryParseRegister();
4059 return (RegNo == (
unsigned)-1);
4065 if (ParseRegister(RegNo, StartLoc, EndLoc))
4073 int ARMAsmParser::tryParseRegister() {
4082 .
Case(
"r13", ARM::SP)
4083 .
Case(
"r14", ARM::LR)
4084 .
Case(
"r15", ARM::PC)
4085 .
Case(
"ip", ARM::R12)
4087 .
Case(
"a1", ARM::R0)
4088 .
Case(
"a2", ARM::R1)
4090 .
Case(
"a4", ARM::R3)
4092 .
Case(
"v2", ARM::R5)
4094 .
Case(
"v4", ARM::R7)
4095 .
Case(
"v5", ARM::R8)
4096 .
Case(
"v6", ARM::R9)
4097 .
Case(
"v7", ARM::R10)
4098 .
Case(
"v8", ARM::R11)
4099 .
Case(
"sb", ARM::R9)
4100 .
Case(
"sl", ARM::R10)
4101 .
Case(
"fp", ARM::R11)
4110 if (Entry == RegisterReqs.
end())
4113 return Entry->getValue();
4117 if (!hasD32() && RegNum >= ARM::D16 && RegNum <= ARM::D31)
4155 std::unique_ptr<ARMOperand> PrevOp(
4156 (ARMOperand *)
Operands.pop_back_val().release());
4157 if (!PrevOp->isReg())
4158 return Error(PrevOp->getStartLoc(),
"shift must be of a register");
4159 int SrcReg = PrevOp->getReg();
4175 const MCExpr *ShiftExpr =
nullptr;
4176 if (getParser().parseExpression(ShiftExpr, EndLoc)) {
4177 Error(ImmLoc,
"invalid immediate shift value");
4183 Error(ImmLoc,
"invalid immediate shift value");
4189 Imm =
CE->getValue();
4193 Error(ImmLoc,
"immediate shift value out of range");
4203 ShiftReg = tryParseRegister();
4204 if (ShiftReg == -1) {
4205 Error(L,
"expected immediate or register in shift operand");
4210 "expected immediate or register in shift operand");
4216 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
4220 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
4236 int RegNo = tryParseRegister();
4240 Operands.push_back(ARMOperand::CreateReg(RegNo, RegStartLoc, RegEndLoc));
4258 if (getParser().parseExpression(ImmVal))
4262 return TokError(
"immediate value expected for vector index");
4290 if (
Name.size() < 2 ||
Name[0] != CoprocOp)
4294 switch (
Name.size()) {
4317 case '0':
return 10;
4318 case '1':
return 11;
4319 case '2':
return 12;
4320 case '3':
return 13;
4321 case '4':
return 14;
4322 case '5':
return 15;
4363 Operands.push_back(ARMOperand::CreateCoprocNum(Num,
S));
4383 Operands.push_back(ARMOperand::CreateCoprocReg(
Reg,
S));
4401 if (getParser().parseExpression(Expr)) {
4402 Error(Loc,
"illegal expression");
4406 if (!CE ||
CE->getValue() < 0 ||
CE->getValue() > 255) {
4407 Error(Loc,
"coprocessor option must be an immediate in range [0, 255]");
4410 int Val =
CE->getValue();
4418 Operands.push_back(ARMOperand::CreateCoprocOption(Val,
S,
E));
4429 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].
contains(
Reg))
4433 case ARM::R0:
return ARM::R1;
case ARM::R1:
return ARM::R2;
4436 case ARM::R6:
return ARM::R7;
case ARM::R7:
return ARM::R8;
4437 case ARM::R8:
return ARM::R9;
case ARM::R9:
return ARM::R10;
4438 case ARM::R10:
return ARM::R11;
case ARM::R11:
return ARM::R12;
4439 case ARM::R12:
return ARM::SP;
case ARM::SP:
return ARM::LR;
4440 case ARM::LR:
return ARM::PC;
case ARM::PC:
return ARM::R0;
4448 unsigned Enc,
unsigned Reg) {
4449 Regs.emplace_back(Enc,
Reg);
4450 for (
auto I = Regs.rbegin(), J =
I + 1,
E = Regs.rend(); J !=
E; ++
I, ++J) {
4451 if (J->first == Enc) {
4452 Regs.erase(J.base());
4467 return TokError(
"Token is not a Left Curly Brace");
4474 int Reg = tryParseRegister();
4476 return Error(RegLoc,
"register expected");
4477 if (!AllowRAAC &&
Reg == ARM::RA_AUTH_CODE)
4478 return Error(RegLoc,
"pseudo-register not allowed");
4485 if (ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(
Reg)) {
4486 Reg = getDRegFromQReg(
Reg);
4487 EReg =
MRI->getEncodingValue(
Reg);
4492 if (
Reg == ARM::RA_AUTH_CODE ||
4493 ARMMCRegisterClasses[ARM::GPRRegClassID].
contains(
Reg))
4494 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
4495 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].
contains(
Reg))
4496 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
4497 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
contains(
Reg))
4498 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
4499 else if (ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].
contains(
Reg))
4500 RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4502 return Error(RegLoc,
"invalid register in register list");
4505 EReg =
MRI->getEncodingValue(
Reg);
4514 if (
Reg == ARM::RA_AUTH_CODE)
4515 return Error(RegLoc,
"pseudo-register not allowed");
4518 int EndReg = tryParseRegister();
4520 return Error(AfterMinusLoc,
"register expected");
4521 if (EndReg == ARM::RA_AUTH_CODE)
4522 return Error(AfterMinusLoc,
"pseudo-register not allowed");
4524 if (ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(EndReg))
4525 EndReg = getDRegFromQReg(EndReg) + 1;
4532 return Error(AfterMinusLoc,
"invalid register in register list");
4534 if (
MRI->getEncodingValue(
Reg) >
MRI->getEncodingValue(EndReg))
4535 return Error(AfterMinusLoc,
"bad range in register list");
4538 while (
Reg != EndReg) {
4540 EReg =
MRI->getEncodingValue(
Reg);
4544 ") in register list");
4553 Reg = tryParseRegister();
4555 return Error(RegLoc,
"register expected");
4556 if (!AllowRAAC &&
Reg == ARM::RA_AUTH_CODE)
4557 return Error(RegLoc,
"pseudo-register not allowed");
4559 bool isQReg =
false;
4560 if (ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(
Reg)) {
4561 Reg = getDRegFromQReg(
Reg);
4565 RC->
getID() == ARMMCRegisterClasses[ARM::GPRRegClassID].getID() &&
4566 ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(
Reg)) {
4569 RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4571 if (
Reg == ARM::VPR &&
4572 (RC == &ARMMCRegisterClasses[ARM::SPRRegClassID] ||
4573 RC == &ARMMCRegisterClasses[ARM::DPRRegClassID] ||
4574 RC == &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID])) {
4575 RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID];
4576 EReg =
MRI->getEncodingValue(
Reg);
4579 ") in register list");
4584 if ((
Reg == ARM::RA_AUTH_CODE &&
4585 RC != &ARMMCRegisterClasses[ARM::GPRRegClassID]) ||
4587 return Error(RegLoc,
"invalid register in register list");
4593 MRI->getEncodingValue(
Reg) <
MRI->getEncodingValue(OldReg)) {
4594 if (ARMMCRegisterClasses[ARM::GPRRegClassID].
contains(
Reg))
4595 Warning(RegLoc,
"register list not in ascending order");
4596 else if (!ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].
contains(
Reg))
4597 return Error(RegLoc,
"register list not in ascending order");
4600 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
4601 RC != &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID] &&
4603 return Error(RegLoc,
"non-contiguous register range");
4604 EReg =
MRI->getEncodingValue(
Reg);
4607 ") in register list");
4610 EReg =
MRI->getEncodingValue(++
Reg);
4634 parseVectorLane(VectorLaneTy &LaneKind,
unsigned &
Index,
SMLoc &EndLoc) {
4641 LaneKind = AllLanes;
4654 if (getParser().parseExpression(LaneIndex)) {
4655 Error(Loc,
"illegal expression");
4660 Error(Loc,
"lane index must be empty or an integer");
4669 int64_t Val =
CE->getValue();
4672 if (Val < 0 || Val > 7) {
4677 LaneKind = IndexedLane;
4688 VectorLaneTy LaneKind;
4696 int Reg = tryParseRegister();
4699 if (ARMMCRegisterClasses[ARM::DPRRegClassID].
contains(
Reg)) {
4705 Operands.push_back(ARMOperand::CreateVectorList(
Reg, 1,
false,
S,
E));
4708 Operands.push_back(ARMOperand::CreateVectorListAllLanes(
Reg, 1,
false,
4712 Operands.push_back(ARMOperand::CreateVectorListIndexed(
Reg, 1,
4719 if (ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(
Reg)) {
4720 Reg = getDRegFromQReg(
Reg);
4726 Reg =
MRI->getMatchingSuperReg(
Reg, ARM::dsub_0,
4727 &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4728 Operands.push_back(ARMOperand::CreateVectorList(
Reg, 2,
false,
S,
E));
4731 Reg =
MRI->getMatchingSuperReg(
Reg, ARM::dsub_0,
4732 &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4733 Operands.push_back(ARMOperand::CreateVectorListAllLanes(
Reg, 2,
false,
4737 Operands.push_back(ARMOperand::CreateVectorListIndexed(
Reg, 2,
4744 Error(
S,
"vector register expected");
4754 int Reg = tryParseRegister();
4756 Error(RegLoc,
"register expected");
4761 unsigned FirstReg =
Reg;
4763 if (hasMVE() && !ARMMCRegisterClasses[ARM::MQPRRegClassID].
contains(
Reg)) {
4769 else if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(
Reg)) {
4770 FirstReg =
Reg = getDRegFromQReg(
Reg);
4786 else if (Spacing == 2) {
4788 "sequential registers in double spaced list");
4793 int EndReg = tryParseRegister();
4795 Error(AfterMinusLoc,
"register expected");
4799 if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(EndReg))
4800 EndReg = getDRegFromQReg(EndReg) + 1;
4807 !ARMMCRegisterClasses[ARM::MQPRRegClassID].
contains(EndReg)) ||
4809 !ARMMCRegisterClasses[ARM::DPRRegClassID].
contains(EndReg))) {
4810 Error(AfterMinusLoc,
"invalid register in register list");
4815 Error(AfterMinusLoc,
"bad range in register list");
4819 VectorLaneTy NextLaneKind;
4820 unsigned NextLaneIndex;
4821 if (parseVectorLane(NextLaneKind, NextLaneIndex,
E) !=
4824 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4825 Error(AfterMinusLoc,
"mismatched lane index in register list");
4830 Count += EndReg -
Reg;
4837 Reg = tryParseRegister();
4839 Error(RegLoc,
"register expected");
4844 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].
contains(
Reg)) {
4845 Error(RegLoc,
"vector register in range Q0-Q7 expected");
4856 else if (ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(
Reg)) {
4859 else if (Spacing == 2) {
4861 "invalid register in double-spaced list (must be 'D' register')");
4864 Reg = getDRegFromQReg(
Reg);
4865 if (
Reg != OldReg + 1) {
4866 Error(RegLoc,
"non-contiguous register range");
4872 VectorLaneTy NextLaneKind;
4873 unsigned NextLaneIndex;
4875 if (parseVectorLane(NextLaneKind, NextLaneIndex,
E) !=
4878 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4879 Error(LaneLoc,
"mismatched lane index in register list");
4888 Spacing = 1 + (
Reg == OldReg + 2);
4891 if (
Reg != OldReg + Spacing) {
4892 Error(RegLoc,
"non-contiguous register range");
4897 VectorLaneTy NextLaneKind;
4898 unsigned NextLaneIndex;
4902 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4903 Error(EndLoc,
"mismatched lane index in register list");
4920 if (Count == 2 && !hasMVE()) {
4922 &ARMMCRegisterClasses[ARM::DPairRegClassID] :
4923 &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
4924 FirstReg =
MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
4926 auto Create = (LaneKind == NoLanes ? ARMOperand::CreateVectorList :
4927 ARMOperand::CreateVectorListAllLanes);
4928 Operands.push_back(Create(FirstReg, Count, (Spacing == 2),
S,
E));
4932 Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,