File: | llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp |
Warning: | line 394, column 36 The result of the left shift is undefined due to shifting by '32', which is greater or equal to the width of type 'int' |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions -------===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | ||||
9 | #include "ARMFeatures.h" | |||
10 | #include "ARMBaseInstrInfo.h" | |||
11 | #include "Utils/ARMBaseInfo.h" | |||
12 | #include "MCTargetDesc/ARMAddressingModes.h" | |||
13 | #include "MCTargetDesc/ARMBaseInfo.h" | |||
14 | #include "MCTargetDesc/ARMInstPrinter.h" | |||
15 | #include "MCTargetDesc/ARMMCExpr.h" | |||
16 | #include "MCTargetDesc/ARMMCTargetDesc.h" | |||
17 | #include "TargetInfo/ARMTargetInfo.h" | |||
18 | #include "llvm/ADT/APFloat.h" | |||
19 | #include "llvm/ADT/APInt.h" | |||
20 | #include "llvm/ADT/None.h" | |||
21 | #include "llvm/ADT/STLExtras.h" | |||
22 | #include "llvm/ADT/SmallSet.h" | |||
23 | #include "llvm/ADT/SmallVector.h" | |||
24 | #include "llvm/ADT/StringMap.h" | |||
25 | #include "llvm/ADT/StringSet.h" | |||
26 | #include "llvm/ADT/StringRef.h" | |||
27 | #include "llvm/ADT/StringSwitch.h" | |||
28 | #include "llvm/ADT/Triple.h" | |||
29 | #include "llvm/ADT/Twine.h" | |||
30 | #include "llvm/MC/MCContext.h" | |||
31 | #include "llvm/MC/MCExpr.h" | |||
32 | #include "llvm/MC/MCInst.h" | |||
33 | #include "llvm/MC/MCInstrDesc.h" | |||
34 | #include "llvm/MC/MCInstrInfo.h" | |||
35 | #include "llvm/MC/MCParser/MCAsmLexer.h" | |||
36 | #include "llvm/MC/MCParser/MCAsmParser.h" | |||
37 | #include "llvm/MC/MCParser/MCAsmParserExtension.h" | |||
38 | #include "llvm/MC/MCParser/MCAsmParserUtils.h" | |||
39 | #include "llvm/MC/MCParser/MCParsedAsmOperand.h" | |||
40 | #include "llvm/MC/MCParser/MCTargetAsmParser.h" | |||
41 | #include "llvm/MC/MCRegisterInfo.h" | |||
42 | #include "llvm/MC/MCSection.h" | |||
43 | #include "llvm/MC/MCStreamer.h" | |||
44 | #include "llvm/MC/MCSubtargetInfo.h" | |||
45 | #include "llvm/MC/MCSymbol.h" | |||
46 | #include "llvm/MC/SubtargetFeature.h" | |||
47 | #include "llvm/Support/ARMBuildAttributes.h" | |||
48 | #include "llvm/Support/ARMEHABI.h" | |||
49 | #include "llvm/Support/Casting.h" | |||
50 | #include "llvm/Support/CommandLine.h" | |||
51 | #include "llvm/Support/Compiler.h" | |||
52 | #include "llvm/Support/ErrorHandling.h" | |||
53 | #include "llvm/Support/MathExtras.h" | |||
54 | #include "llvm/Support/SMLoc.h" | |||
55 | #include "llvm/Support/TargetParser.h" | |||
56 | #include "llvm/Support/TargetRegistry.h" | |||
57 | #include "llvm/Support/raw_ostream.h" | |||
58 | #include <algorithm> | |||
59 | #include <cassert> | |||
60 | #include <cstddef> | |||
61 | #include <cstdint> | |||
62 | #include <iterator> | |||
63 | #include <limits> | |||
64 | #include <memory> | |||
65 | #include <string> | |||
66 | #include <utility> | |||
67 | #include <vector> | |||
68 | ||||
69 | #define DEBUG_TYPE"asm-parser" "asm-parser" | |||
70 | ||||
71 | using namespace llvm; | |||
72 | ||||
73 | namespace llvm { | |||
74 | extern const MCInstrDesc ARMInsts[]; | |||
75 | } // end namespace llvm | |||
76 | ||||
77 | namespace { | |||
78 | ||||
79 | enum class ImplicitItModeTy { Always, Never, ARMOnly, ThumbOnly }; | |||
80 | ||||
81 | static cl::opt<ImplicitItModeTy> ImplicitItMode( | |||
82 | "arm-implicit-it", cl::init(ImplicitItModeTy::ARMOnly), | |||
83 | cl::desc("Allow conditional instructions outdside of an IT block"), | |||
84 | cl::values(clEnumValN(ImplicitItModeTy::Always, "always",llvm::cl::OptionEnumValue { "always", int(ImplicitItModeTy::Always ), "Accept in both ISAs, emit implicit ITs in Thumb" } | |||
85 | "Accept in both ISAs, emit implicit ITs in Thumb")llvm::cl::OptionEnumValue { "always", int(ImplicitItModeTy::Always ), "Accept in both ISAs, emit implicit ITs in Thumb" }, | |||
86 | clEnumValN(ImplicitItModeTy::Never, "never",llvm::cl::OptionEnumValue { "never", int(ImplicitItModeTy::Never ), "Warn in ARM, reject in Thumb" } | |||
87 | "Warn in ARM, reject in Thumb")llvm::cl::OptionEnumValue { "never", int(ImplicitItModeTy::Never ), "Warn in ARM, reject in Thumb" }, | |||
88 | clEnumValN(ImplicitItModeTy::ARMOnly, "arm",llvm::cl::OptionEnumValue { "arm", int(ImplicitItModeTy::ARMOnly ), "Accept in ARM, reject in Thumb" } | |||
89 | "Accept in ARM, reject in Thumb")llvm::cl::OptionEnumValue { "arm", int(ImplicitItModeTy::ARMOnly ), "Accept in ARM, reject in Thumb" }, | |||
90 | clEnumValN(ImplicitItModeTy::ThumbOnly, "thumb",llvm::cl::OptionEnumValue { "thumb", int(ImplicitItModeTy::ThumbOnly ), "Warn in ARM, emit implicit ITs in Thumb" } | |||
91 | "Warn in ARM, emit implicit ITs in Thumb")llvm::cl::OptionEnumValue { "thumb", int(ImplicitItModeTy::ThumbOnly ), "Warn in ARM, emit implicit ITs in Thumb" })); | |||
92 | ||||
93 | static cl::opt<bool> AddBuildAttributes("arm-add-build-attributes", | |||
94 | cl::init(false)); | |||
95 | ||||
96 | enum VectorLaneTy { NoLanes, AllLanes, IndexedLane }; | |||
97 | ||||
98 | static inline unsigned extractITMaskBit(unsigned Mask, unsigned Position) { | |||
99 | // Position==0 means we're not in an IT block at all. Position==1 | |||
100 | // means we want the first state bit, which is always 0 (Then). | |||
101 | // Position==2 means we want the second state bit, stored at bit 3 | |||
102 | // of Mask, and so on downwards. So (5 - Position) will shift the | |||
103 | // right bit down to bit 0, including the always-0 bit at bit 4 for | |||
104 | // the mandatory initial Then. | |||
105 | return (Mask >> (5 - Position) & 1); | |||
106 | } | |||
107 | ||||
108 | class UnwindContext { | |||
109 | using Locs = SmallVector<SMLoc, 4>; | |||
110 | ||||
111 | MCAsmParser &Parser; | |||
112 | Locs FnStartLocs; | |||
113 | Locs CantUnwindLocs; | |||
114 | Locs PersonalityLocs; | |||
115 | Locs PersonalityIndexLocs; | |||
116 | Locs HandlerDataLocs; | |||
117 | int FPReg; | |||
118 | ||||
119 | public: | |||
120 | UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {} | |||
121 | ||||
122 | bool hasFnStart() const { return !FnStartLocs.empty(); } | |||
123 | bool cantUnwind() const { return !CantUnwindLocs.empty(); } | |||
124 | bool hasHandlerData() const { return !HandlerDataLocs.empty(); } | |||
125 | ||||
126 | bool hasPersonality() const { | |||
127 | return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty()); | |||
128 | } | |||
129 | ||||
130 | void recordFnStart(SMLoc L) { FnStartLocs.push_back(L); } | |||
131 | void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(L); } | |||
132 | void recordPersonality(SMLoc L) { PersonalityLocs.push_back(L); } | |||
133 | void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(L); } | |||
134 | void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(L); } | |||
135 | ||||
136 | void saveFPReg(int Reg) { FPReg = Reg; } | |||
137 | int getFPReg() const { return FPReg; } | |||
138 | ||||
139 | void emitFnStartLocNotes() const { | |||
140 | for (Locs::const_iterator FI = FnStartLocs.begin(), FE = FnStartLocs.end(); | |||
141 | FI != FE; ++FI) | |||
142 | Parser.Note(*FI, ".fnstart was specified here"); | |||
143 | } | |||
144 | ||||
145 | void emitCantUnwindLocNotes() const { | |||
146 | for (Locs::const_iterator UI = CantUnwindLocs.begin(), | |||
147 | UE = CantUnwindLocs.end(); UI != UE; ++UI) | |||
148 | Parser.Note(*UI, ".cantunwind was specified here"); | |||
149 | } | |||
150 | ||||
151 | void emitHandlerDataLocNotes() const { | |||
152 | for (Locs::const_iterator HI = HandlerDataLocs.begin(), | |||
153 | HE = HandlerDataLocs.end(); HI != HE; ++HI) | |||
154 | Parser.Note(*HI, ".handlerdata was specified here"); | |||
155 | } | |||
156 | ||||
157 | void emitPersonalityLocNotes() const { | |||
158 | for (Locs::const_iterator PI = PersonalityLocs.begin(), | |||
159 | PE = PersonalityLocs.end(), | |||
160 | PII = PersonalityIndexLocs.begin(), | |||
161 | PIE = PersonalityIndexLocs.end(); | |||
162 | PI != PE || PII != PIE;) { | |||
163 | if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer())) | |||
164 | Parser.Note(*PI++, ".personality was specified here"); | |||
165 | else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer())) | |||
166 | Parser.Note(*PII++, ".personalityindex was specified here"); | |||
167 | else | |||
168 | llvm_unreachable(".personality and .personalityindex cannot be "::llvm::llvm_unreachable_internal(".personality and .personalityindex cannot be " "at the same location", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 169) | |||
169 | "at the same location")::llvm::llvm_unreachable_internal(".personality and .personalityindex cannot be " "at the same location", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 169); | |||
170 | } | |||
171 | } | |||
172 | ||||
173 | void reset() { | |||
174 | FnStartLocs = Locs(); | |||
175 | CantUnwindLocs = Locs(); | |||
176 | PersonalityLocs = Locs(); | |||
177 | HandlerDataLocs = Locs(); | |||
178 | PersonalityIndexLocs = Locs(); | |||
179 | FPReg = ARM::SP; | |||
180 | } | |||
181 | }; | |||
182 | ||||
183 | // Various sets of ARM instruction mnemonics which are used by the asm parser | |||
184 | class ARMMnemonicSets { | |||
185 | StringSet<> CDE; | |||
186 | StringSet<> CDEWithVPTSuffix; | |||
187 | public: | |||
188 | ARMMnemonicSets(const MCSubtargetInfo &STI); | |||
189 | ||||
190 | /// Returns true iff a given mnemonic is a CDE instruction | |||
191 | bool isCDEInstr(StringRef Mnemonic) { | |||
192 | // Quick check before searching the set | |||
193 | if (!Mnemonic.startswith("cx") && !Mnemonic.startswith("vcx")) | |||
194 | return false; | |||
195 | return CDE.count(Mnemonic); | |||
196 | } | |||
197 | ||||
198 | /// Returns true iff a given mnemonic is a VPT-predicable CDE instruction | |||
199 | /// (possibly with a predication suffix "e" or "t") | |||
200 | bool isVPTPredicableCDEInstr(StringRef Mnemonic) { | |||
201 | if (!Mnemonic.startswith("vcx")) | |||
202 | return false; | |||
203 | return CDEWithVPTSuffix.count(Mnemonic); | |||
204 | } | |||
205 | ||||
206 | /// Returns true iff a given mnemonic is an IT-predicable CDE instruction | |||
207 | /// (possibly with a condition suffix) | |||
208 | bool isITPredicableCDEInstr(StringRef Mnemonic) { | |||
209 | if (!Mnemonic.startswith("cx")) | |||
210 | return false; | |||
211 | return Mnemonic.startswith("cx1a") || Mnemonic.startswith("cx1da") || | |||
212 | Mnemonic.startswith("cx2a") || Mnemonic.startswith("cx2da") || | |||
213 | Mnemonic.startswith("cx3a") || Mnemonic.startswith("cx3da"); | |||
214 | } | |||
215 | ||||
216 | /// Return true iff a given mnemonic is an integer CDE instruction with | |||
217 | /// dual-register destination | |||
218 | bool isCDEDualRegInstr(StringRef Mnemonic) { | |||
219 | if (!Mnemonic.startswith("cx")) | |||
220 | return false; | |||
221 | return Mnemonic == "cx1d" || Mnemonic == "cx1da" || | |||
222 | Mnemonic == "cx2d" || Mnemonic == "cx2da" || | |||
223 | Mnemonic == "cx3d" || Mnemonic == "cx3da"; | |||
224 | } | |||
225 | }; | |||
226 | ||||
227 | ARMMnemonicSets::ARMMnemonicSets(const MCSubtargetInfo &STI) { | |||
228 | for (StringRef Mnemonic: { "cx1", "cx1a", "cx1d", "cx1da", | |||
229 | "cx2", "cx2a", "cx2d", "cx2da", | |||
230 | "cx3", "cx3a", "cx3d", "cx3da", }) | |||
231 | CDE.insert(Mnemonic); | |||
232 | for (StringRef Mnemonic : | |||
233 | {"vcx1", "vcx1a", "vcx2", "vcx2a", "vcx3", "vcx3a"}) { | |||
234 | CDE.insert(Mnemonic); | |||
235 | CDEWithVPTSuffix.insert(Mnemonic); | |||
236 | CDEWithVPTSuffix.insert(std::string(Mnemonic) + "t"); | |||
237 | CDEWithVPTSuffix.insert(std::string(Mnemonic) + "e"); | |||
238 | } | |||
239 | } | |||
240 | ||||
241 | class ARMAsmParser : public MCTargetAsmParser { | |||
242 | const MCRegisterInfo *MRI; | |||
243 | UnwindContext UC; | |||
244 | ARMMnemonicSets MS; | |||
245 | ||||
246 | ARMTargetStreamer &getTargetStreamer() { | |||
247 | assert(getParser().getStreamer().getTargetStreamer() &&(static_cast <bool> (getParser().getStreamer().getTargetStreamer () && "do not have a target streamer") ? void (0) : __assert_fail ("getParser().getStreamer().getTargetStreamer() && \"do not have a target streamer\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 248, __extension__ __PRETTY_FUNCTION__)) | |||
248 | "do not have a target streamer")(static_cast <bool> (getParser().getStreamer().getTargetStreamer () && "do not have a target streamer") ? void (0) : __assert_fail ("getParser().getStreamer().getTargetStreamer() && \"do not have a target streamer\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 248, __extension__ __PRETTY_FUNCTION__)); | |||
249 | MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer(); | |||
250 | return static_cast<ARMTargetStreamer &>(TS); | |||
251 | } | |||
252 | ||||
253 | // Map of register aliases registers via the .req directive. | |||
254 | StringMap<unsigned> RegisterReqs; | |||
255 | ||||
256 | bool NextSymbolIsThumb; | |||
257 | ||||
258 | bool useImplicitITThumb() const { | |||
259 | return ImplicitItMode == ImplicitItModeTy::Always || | |||
260 | ImplicitItMode == ImplicitItModeTy::ThumbOnly; | |||
261 | } | |||
262 | ||||
263 | bool useImplicitITARM() const { | |||
264 | return ImplicitItMode == ImplicitItModeTy::Always || | |||
265 | ImplicitItMode == ImplicitItModeTy::ARMOnly; | |||
266 | } | |||
267 | ||||
268 | struct { | |||
269 | ARMCC::CondCodes Cond; // Condition for IT block. | |||
270 | unsigned Mask:4; // Condition mask for instructions. | |||
271 | // Starting at first 1 (from lsb). | |||
272 | // '1' condition as indicated in IT. | |||
273 | // '0' inverse of condition (else). | |||
274 | // Count of instructions in IT block is | |||
275 | // 4 - trailingzeroes(mask) | |||
276 | // Note that this does not have the same encoding | |||
277 | // as in the IT instruction, which also depends | |||
278 | // on the low bit of the condition code. | |||
279 | ||||
280 | unsigned CurPosition; // Current position in parsing of IT | |||
281 | // block. In range [0,4], with 0 being the IT | |||
282 | // instruction itself. Initialized according to | |||
283 | // count of instructions in block. ~0U if no | |||
284 | // active IT block. | |||
285 | ||||
286 | bool IsExplicit; // true - The IT instruction was present in the | |||
287 | // input, we should not modify it. | |||
288 | // false - The IT instruction was added | |||
289 | // implicitly, we can extend it if that | |||
290 | // would be legal. | |||
291 | } ITState; | |||
292 | ||||
293 | SmallVector<MCInst, 4> PendingConditionalInsts; | |||
294 | ||||
295 | void flushPendingInstructions(MCStreamer &Out) override { | |||
296 | if (!inImplicitITBlock()) { | |||
297 | assert(PendingConditionalInsts.size() == 0)(static_cast <bool> (PendingConditionalInsts.size() == 0 ) ? void (0) : __assert_fail ("PendingConditionalInsts.size() == 0" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 297, __extension__ __PRETTY_FUNCTION__)); | |||
298 | return; | |||
299 | } | |||
300 | ||||
301 | // Emit the IT instruction | |||
302 | MCInst ITInst; | |||
303 | ITInst.setOpcode(ARM::t2IT); | |||
304 | ITInst.addOperand(MCOperand::createImm(ITState.Cond)); | |||
305 | ITInst.addOperand(MCOperand::createImm(ITState.Mask)); | |||
306 | Out.emitInstruction(ITInst, getSTI()); | |||
307 | ||||
308 | // Emit the conditonal instructions | |||
309 | assert(PendingConditionalInsts.size() <= 4)(static_cast <bool> (PendingConditionalInsts.size() <= 4) ? void (0) : __assert_fail ("PendingConditionalInsts.size() <= 4" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 309, __extension__ __PRETTY_FUNCTION__)); | |||
310 | for (const MCInst &Inst : PendingConditionalInsts) { | |||
311 | Out.emitInstruction(Inst, getSTI()); | |||
312 | } | |||
313 | PendingConditionalInsts.clear(); | |||
314 | ||||
315 | // Clear the IT state | |||
316 | ITState.Mask = 0; | |||
317 | ITState.CurPosition = ~0U; | |||
318 | } | |||
319 | ||||
320 | bool inITBlock() { return ITState.CurPosition != ~0U; } | |||
321 | bool inExplicitITBlock() { return inITBlock() && ITState.IsExplicit; } | |||
322 | bool inImplicitITBlock() { return inITBlock() && !ITState.IsExplicit; } | |||
323 | ||||
324 | bool lastInITBlock() { | |||
325 | return ITState.CurPosition == 4 - countTrailingZeros(ITState.Mask); | |||
326 | } | |||
327 | ||||
328 | void forwardITPosition() { | |||
329 | if (!inITBlock()) return; | |||
330 | // Move to the next instruction in the IT block, if there is one. If not, | |||
331 | // mark the block as done, except for implicit IT blocks, which we leave | |||
332 | // open until we find an instruction that can't be added to it. | |||
333 | unsigned TZ = countTrailingZeros(ITState.Mask); | |||
334 | if (++ITState.CurPosition == 5 - TZ && ITState.IsExplicit) | |||
335 | ITState.CurPosition = ~0U; // Done with the IT block after this. | |||
336 | } | |||
337 | ||||
338 | // Rewind the state of the current IT block, removing the last slot from it. | |||
339 | void rewindImplicitITPosition() { | |||
340 | assert(inImplicitITBlock())(static_cast <bool> (inImplicitITBlock()) ? void (0) : __assert_fail ("inImplicitITBlock()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 340, __extension__ __PRETTY_FUNCTION__)); | |||
341 | assert(ITState.CurPosition > 1)(static_cast <bool> (ITState.CurPosition > 1) ? void (0) : __assert_fail ("ITState.CurPosition > 1", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 341, __extension__ __PRETTY_FUNCTION__)); | |||
342 | ITState.CurPosition--; | |||
343 | unsigned TZ = countTrailingZeros(ITState.Mask); | |||
344 | unsigned NewMask = 0; | |||
345 | NewMask |= ITState.Mask & (0xC << TZ); | |||
346 | NewMask |= 0x2 << TZ; | |||
347 | ITState.Mask = NewMask; | |||
348 | } | |||
349 | ||||
350 | // Rewind the state of the current IT block, removing the last slot from it. | |||
351 | // If we were at the first slot, this closes the IT block. | |||
352 | void discardImplicitITBlock() { | |||
353 | assert(inImplicitITBlock())(static_cast <bool> (inImplicitITBlock()) ? void (0) : __assert_fail ("inImplicitITBlock()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 353, __extension__ __PRETTY_FUNCTION__)); | |||
354 | assert(ITState.CurPosition == 1)(static_cast <bool> (ITState.CurPosition == 1) ? void ( 0) : __assert_fail ("ITState.CurPosition == 1", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 354, __extension__ __PRETTY_FUNCTION__)); | |||
355 | ITState.CurPosition = ~0U; | |||
356 | } | |||
357 | ||||
358 | // Return the low-subreg of a given Q register. | |||
359 | unsigned getDRegFromQReg(unsigned QReg) const { | |||
360 | return MRI->getSubReg(QReg, ARM::dsub_0); | |||
361 | } | |||
362 | ||||
363 | // Get the condition code corresponding to the current IT block slot. | |||
364 | ARMCC::CondCodes currentITCond() { | |||
365 | unsigned MaskBit = extractITMaskBit(ITState.Mask, ITState.CurPosition); | |||
366 | return MaskBit ? ARMCC::getOppositeCondition(ITState.Cond) : ITState.Cond; | |||
367 | } | |||
368 | ||||
369 | // Invert the condition of the current IT block slot without changing any | |||
370 | // other slots in the same block. | |||
371 | void invertCurrentITCondition() { | |||
372 | if (ITState.CurPosition == 1) { | |||
373 | ITState.Cond = ARMCC::getOppositeCondition(ITState.Cond); | |||
374 | } else { | |||
375 | ITState.Mask ^= 1 << (5 - ITState.CurPosition); | |||
376 | } | |||
377 | } | |||
378 | ||||
379 | // Returns true if the current IT block is full (all 4 slots used). | |||
380 | bool isITBlockFull() { | |||
381 | return inITBlock() && (ITState.Mask & 1); | |||
382 | } | |||
383 | ||||
384 | // Extend the current implicit IT block to have one more slot with the given | |||
385 | // condition code. | |||
386 | void extendImplicitITBlock(ARMCC::CondCodes Cond) { | |||
387 | assert(inImplicitITBlock())(static_cast <bool> (inImplicitITBlock()) ? void (0) : __assert_fail ("inImplicitITBlock()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 387, __extension__ __PRETTY_FUNCTION__)); | |||
388 | assert(!isITBlockFull())(static_cast <bool> (!isITBlockFull()) ? void (0) : __assert_fail ("!isITBlockFull()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 388, __extension__ __PRETTY_FUNCTION__)); | |||
389 | assert(Cond == ITState.Cond ||(static_cast <bool> (Cond == ITState.Cond || Cond == ARMCC ::getOppositeCondition(ITState.Cond)) ? void (0) : __assert_fail ("Cond == ITState.Cond || Cond == ARMCC::getOppositeCondition(ITState.Cond)" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 390, __extension__ __PRETTY_FUNCTION__)) | |||
390 | Cond == ARMCC::getOppositeCondition(ITState.Cond))(static_cast <bool> (Cond == ITState.Cond || Cond == ARMCC ::getOppositeCondition(ITState.Cond)) ? void (0) : __assert_fail ("Cond == ITState.Cond || Cond == ARMCC::getOppositeCondition(ITState.Cond)" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 390, __extension__ __PRETTY_FUNCTION__)); | |||
391 | unsigned TZ = countTrailingZeros(ITState.Mask); | |||
392 | unsigned NewMask = 0; | |||
393 | // Keep any existing condition bits. | |||
394 | NewMask |= ITState.Mask & (0xE << TZ); | |||
| ||||
395 | // Insert the new condition bit. | |||
396 | NewMask |= (Cond != ITState.Cond) << TZ; | |||
397 | // Move the trailing 1 down one bit. | |||
398 | NewMask |= 1 << (TZ - 1); | |||
399 | ITState.Mask = NewMask; | |||
400 | } | |||
401 | ||||
402 | // Create a new implicit IT block with a dummy condition code. | |||
403 | void startImplicitITBlock() { | |||
404 | assert(!inITBlock())(static_cast <bool> (!inITBlock()) ? void (0) : __assert_fail ("!inITBlock()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 404, __extension__ __PRETTY_FUNCTION__)); | |||
405 | ITState.Cond = ARMCC::AL; | |||
406 | ITState.Mask = 8; | |||
407 | ITState.CurPosition = 1; | |||
408 | ITState.IsExplicit = false; | |||
409 | } | |||
410 | ||||
411 | // Create a new explicit IT block with the given condition and mask. | |||
412 | // The mask should be in the format used in ARMOperand and | |||
413 | // MCOperand, with a 1 implying 'e', regardless of the low bit of | |||
414 | // the condition. | |||
415 | void startExplicitITBlock(ARMCC::CondCodes Cond, unsigned Mask) { | |||
416 | assert(!inITBlock())(static_cast <bool> (!inITBlock()) ? void (0) : __assert_fail ("!inITBlock()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 416, __extension__ __PRETTY_FUNCTION__)); | |||
417 | ITState.Cond = Cond; | |||
418 | ITState.Mask = Mask; | |||
419 | ITState.CurPosition = 0; | |||
420 | ITState.IsExplicit = true; | |||
421 | } | |||
422 | ||||
423 | struct { | |||
424 | unsigned Mask : 4; | |||
425 | unsigned CurPosition; | |||
426 | } VPTState; | |||
427 | bool inVPTBlock() { return VPTState.CurPosition != ~0U; } | |||
428 | void forwardVPTPosition() { | |||
429 | if (!inVPTBlock()) return; | |||
430 | unsigned TZ = countTrailingZeros(VPTState.Mask); | |||
431 | if (++VPTState.CurPosition == 5 - TZ) | |||
432 | VPTState.CurPosition = ~0U; | |||
433 | } | |||
434 | ||||
435 | void Note(SMLoc L, const Twine &Msg, SMRange Range = None) { | |||
436 | return getParser().Note(L, Msg, Range); | |||
437 | } | |||
438 | ||||
439 | bool Warning(SMLoc L, const Twine &Msg, SMRange Range = None) { | |||
440 | return getParser().Warning(L, Msg, Range); | |||
441 | } | |||
442 | ||||
443 | bool Error(SMLoc L, const Twine &Msg, SMRange Range = None) { | |||
444 | return getParser().Error(L, Msg, Range); | |||
445 | } | |||
446 | ||||
447 | bool validatetLDMRegList(const MCInst &Inst, const OperandVector &Operands, | |||
448 | unsigned ListNo, bool IsARPop = false); | |||
449 | bool validatetSTMRegList(const MCInst &Inst, const OperandVector &Operands, | |||
450 | unsigned ListNo); | |||
451 | ||||
452 | int tryParseRegister(); | |||
453 | bool tryParseRegisterWithWriteBack(OperandVector &); | |||
454 | int tryParseShiftRegister(OperandVector &); | |||
455 | bool parseRegisterList(OperandVector &, bool EnforceOrder = true); | |||
456 | bool parseMemory(OperandVector &); | |||
457 | bool parseOperand(OperandVector &, StringRef Mnemonic); | |||
458 | bool parsePrefix(ARMMCExpr::VariantKind &RefKind); | |||
459 | bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType, | |||
460 | unsigned &ShiftAmount); | |||
461 | bool parseLiteralValues(unsigned Size, SMLoc L); | |||
462 | bool parseDirectiveThumb(SMLoc L); | |||
463 | bool parseDirectiveARM(SMLoc L); | |||
464 | bool parseDirectiveThumbFunc(SMLoc L); | |||
465 | bool parseDirectiveCode(SMLoc L); | |||
466 | bool parseDirectiveSyntax(SMLoc L); | |||
467 | bool parseDirectiveReq(StringRef Name, SMLoc L); | |||
468 | bool parseDirectiveUnreq(SMLoc L); | |||
469 | bool parseDirectiveArch(SMLoc L); | |||
470 | bool parseDirectiveEabiAttr(SMLoc L); | |||
471 | bool parseDirectiveCPU(SMLoc L); | |||
472 | bool parseDirectiveFPU(SMLoc L); | |||
473 | bool parseDirectiveFnStart(SMLoc L); | |||
474 | bool parseDirectiveFnEnd(SMLoc L); | |||
475 | bool parseDirectiveCantUnwind(SMLoc L); | |||
476 | bool parseDirectivePersonality(SMLoc L); | |||
477 | bool parseDirectiveHandlerData(SMLoc L); | |||
478 | bool parseDirectiveSetFP(SMLoc L); | |||
479 | bool parseDirectivePad(SMLoc L); | |||
480 | bool parseDirectiveRegSave(SMLoc L, bool IsVector); | |||
481 | bool parseDirectiveInst(SMLoc L, char Suffix = '\0'); | |||
482 | bool parseDirectiveLtorg(SMLoc L); | |||
483 | bool parseDirectiveEven(SMLoc L); | |||
484 | bool parseDirectivePersonalityIndex(SMLoc L); | |||
485 | bool parseDirectiveUnwindRaw(SMLoc L); | |||
486 | bool parseDirectiveTLSDescSeq(SMLoc L); | |||
487 | bool parseDirectiveMovSP(SMLoc L); | |||
488 | bool parseDirectiveObjectArch(SMLoc L); | |||
489 | bool parseDirectiveArchExtension(SMLoc L); | |||
490 | bool parseDirectiveAlign(SMLoc L); | |||
491 | bool parseDirectiveThumbSet(SMLoc L); | |||
492 | ||||
493 | bool isMnemonicVPTPredicable(StringRef Mnemonic, StringRef ExtraToken); | |||
494 | StringRef splitMnemonic(StringRef Mnemonic, StringRef ExtraToken, | |||
495 | unsigned &PredicationCode, | |||
496 | unsigned &VPTPredicationCode, bool &CarrySetting, | |||
497 | unsigned &ProcessorIMod, StringRef &ITMask); | |||
498 | void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef ExtraToken, | |||
499 | StringRef FullInst, bool &CanAcceptCarrySet, | |||
500 | bool &CanAcceptPredicationCode, | |||
501 | bool &CanAcceptVPTPredicationCode); | |||
502 | bool enableArchExtFeature(StringRef Name, SMLoc &ExtLoc); | |||
503 | ||||
504 | void tryConvertingToTwoOperandForm(StringRef Mnemonic, bool CarrySetting, | |||
505 | OperandVector &Operands); | |||
506 | bool CDEConvertDualRegOperand(StringRef Mnemonic, OperandVector &Operands); | |||
507 | ||||
508 | bool isThumb() const { | |||
509 | // FIXME: Can tablegen auto-generate this? | |||
510 | return getSTI().getFeatureBits()[ARM::ModeThumb]; | |||
511 | } | |||
512 | ||||
513 | bool isThumbOne() const { | |||
514 | return isThumb() && !getSTI().getFeatureBits()[ARM::FeatureThumb2]; | |||
515 | } | |||
516 | ||||
517 | bool isThumbTwo() const { | |||
518 | return isThumb() && getSTI().getFeatureBits()[ARM::FeatureThumb2]; | |||
519 | } | |||
520 | ||||
521 | bool hasThumb() const { | |||
522 | return getSTI().getFeatureBits()[ARM::HasV4TOps]; | |||
523 | } | |||
524 | ||||
525 | bool hasThumb2() const { | |||
526 | return getSTI().getFeatureBits()[ARM::FeatureThumb2]; | |||
527 | } | |||
528 | ||||
529 | bool hasV6Ops() const { | |||
530 | return getSTI().getFeatureBits()[ARM::HasV6Ops]; | |||
531 | } | |||
532 | ||||
533 | bool hasV6T2Ops() const { | |||
534 | return getSTI().getFeatureBits()[ARM::HasV6T2Ops]; | |||
535 | } | |||
536 | ||||
537 | bool hasV6MOps() const { | |||
538 | return getSTI().getFeatureBits()[ARM::HasV6MOps]; | |||
539 | } | |||
540 | ||||
541 | bool hasV7Ops() const { | |||
542 | return getSTI().getFeatureBits()[ARM::HasV7Ops]; | |||
543 | } | |||
544 | ||||
545 | bool hasV8Ops() const { | |||
546 | return getSTI().getFeatureBits()[ARM::HasV8Ops]; | |||
547 | } | |||
548 | ||||
549 | bool hasV8MBaseline() const { | |||
550 | return getSTI().getFeatureBits()[ARM::HasV8MBaselineOps]; | |||
551 | } | |||
552 | ||||
553 | bool hasV8MMainline() const { | |||
554 | return getSTI().getFeatureBits()[ARM::HasV8MMainlineOps]; | |||
555 | } | |||
556 | bool hasV8_1MMainline() const { | |||
557 | return getSTI().getFeatureBits()[ARM::HasV8_1MMainlineOps]; | |||
558 | } | |||
559 | bool hasMVE() const { | |||
560 | return getSTI().getFeatureBits()[ARM::HasMVEIntegerOps]; | |||
561 | } | |||
562 | bool hasMVEFloat() const { | |||
563 | return getSTI().getFeatureBits()[ARM::HasMVEFloatOps]; | |||
564 | } | |||
565 | bool hasCDE() const { | |||
566 | return getSTI().getFeatureBits()[ARM::HasCDEOps]; | |||
567 | } | |||
568 | bool has8MSecExt() const { | |||
569 | return getSTI().getFeatureBits()[ARM::Feature8MSecExt]; | |||
570 | } | |||
571 | ||||
572 | bool hasARM() const { | |||
573 | return !getSTI().getFeatureBits()[ARM::FeatureNoARM]; | |||
574 | } | |||
575 | ||||
576 | bool hasDSP() const { | |||
577 | return getSTI().getFeatureBits()[ARM::FeatureDSP]; | |||
578 | } | |||
579 | ||||
580 | bool hasD32() const { | |||
581 | return getSTI().getFeatureBits()[ARM::FeatureD32]; | |||
582 | } | |||
583 | ||||
584 | bool hasV8_1aOps() const { | |||
585 | return getSTI().getFeatureBits()[ARM::HasV8_1aOps]; | |||
586 | } | |||
587 | ||||
588 | bool hasRAS() const { | |||
589 | return getSTI().getFeatureBits()[ARM::FeatureRAS]; | |||
590 | } | |||
591 | ||||
592 | void SwitchMode() { | |||
593 | MCSubtargetInfo &STI = copySTI(); | |||
594 | auto FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb)); | |||
595 | setAvailableFeatures(FB); | |||
596 | } | |||
597 | ||||
598 | void FixModeAfterArchChange(bool WasThumb, SMLoc Loc); | |||
599 | ||||
600 | bool isMClass() const { | |||
601 | return getSTI().getFeatureBits()[ARM::FeatureMClass]; | |||
602 | } | |||
603 | ||||
604 | /// @name Auto-generated Match Functions | |||
605 | /// { | |||
606 | ||||
607 | #define GET_ASSEMBLER_HEADER | |||
608 | #include "ARMGenAsmMatcher.inc" | |||
609 | ||||
610 | /// } | |||
611 | ||||
612 | OperandMatchResultTy parseITCondCode(OperandVector &); | |||
613 | OperandMatchResultTy parseCoprocNumOperand(OperandVector &); | |||
614 | OperandMatchResultTy parseCoprocRegOperand(OperandVector &); | |||
615 | OperandMatchResultTy parseCoprocOptionOperand(OperandVector &); | |||
616 | OperandMatchResultTy parseMemBarrierOptOperand(OperandVector &); | |||
617 | OperandMatchResultTy parseTraceSyncBarrierOptOperand(OperandVector &); | |||
618 | OperandMatchResultTy parseInstSyncBarrierOptOperand(OperandVector &); | |||
619 | OperandMatchResultTy parseProcIFlagsOperand(OperandVector &); | |||
620 | OperandMatchResultTy parseMSRMaskOperand(OperandVector &); | |||
621 | OperandMatchResultTy parseBankedRegOperand(OperandVector &); | |||
622 | OperandMatchResultTy parsePKHImm(OperandVector &O, StringRef Op, int Low, | |||
623 | int High); | |||
624 | OperandMatchResultTy parsePKHLSLImm(OperandVector &O) { | |||
625 | return parsePKHImm(O, "lsl", 0, 31); | |||
626 | } | |||
627 | OperandMatchResultTy parsePKHASRImm(OperandVector &O) { | |||
628 | return parsePKHImm(O, "asr", 1, 32); | |||
629 | } | |||
630 | OperandMatchResultTy parseSetEndImm(OperandVector &); | |||
631 | OperandMatchResultTy parseShifterImm(OperandVector &); | |||
632 | OperandMatchResultTy parseRotImm(OperandVector &); | |||
633 | OperandMatchResultTy parseModImm(OperandVector &); | |||
634 | OperandMatchResultTy parseBitfield(OperandVector &); | |||
635 | OperandMatchResultTy parsePostIdxReg(OperandVector &); | |||
636 | OperandMatchResultTy parseAM3Offset(OperandVector &); | |||
637 | OperandMatchResultTy parseFPImm(OperandVector &); | |||
638 | OperandMatchResultTy parseVectorList(OperandVector &); | |||
639 | OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, | |||
640 | SMLoc &EndLoc); | |||
641 | ||||
642 | // Asm Match Converter Methods | |||
643 | void cvtThumbMultiply(MCInst &Inst, const OperandVector &); | |||
644 | void cvtThumbBranches(MCInst &Inst, const OperandVector &); | |||
645 | void cvtMVEVMOVQtoDReg(MCInst &Inst, const OperandVector &); | |||
646 | ||||
647 | bool validateInstruction(MCInst &Inst, const OperandVector &Ops); | |||
648 | bool processInstruction(MCInst &Inst, const OperandVector &Ops, MCStreamer &Out); | |||
649 | bool shouldOmitCCOutOperand(StringRef Mnemonic, OperandVector &Operands); | |||
650 | bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands); | |||
651 | bool shouldOmitVectorPredicateOperand(StringRef Mnemonic, OperandVector &Operands); | |||
652 | bool isITBlockTerminator(MCInst &Inst) const; | |||
653 | void fixupGNULDRDAlias(StringRef Mnemonic, OperandVector &Operands); | |||
654 | bool validateLDRDSTRD(MCInst &Inst, const OperandVector &Operands, | |||
655 | bool Load, bool ARMMode, bool Writeback); | |||
656 | ||||
657 | public: | |||
658 | enum ARMMatchResultTy { | |||
659 | Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY, | |||
660 | Match_RequiresNotITBlock, | |||
661 | Match_RequiresV6, | |||
662 | Match_RequiresThumb2, | |||
663 | Match_RequiresV8, | |||
664 | Match_RequiresFlagSetting, | |||
665 | #define GET_OPERAND_DIAGNOSTIC_TYPES | |||
666 | #include "ARMGenAsmMatcher.inc" | |||
667 | ||||
668 | }; | |||
669 | ||||
670 | ARMAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser, | |||
671 | const MCInstrInfo &MII, const MCTargetOptions &Options) | |||
672 | : MCTargetAsmParser(Options, STI, MII), UC(Parser), MS(STI) { | |||
673 | MCAsmParserExtension::Initialize(Parser); | |||
674 | ||||
675 | // Cache the MCRegisterInfo. | |||
676 | MRI = getContext().getRegisterInfo(); | |||
677 | ||||
678 | // Initialize the set of available features. | |||
679 | setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); | |||
680 | ||||
681 | // Add build attributes based on the selected target. | |||
682 | if (AddBuildAttributes) | |||
683 | getTargetStreamer().emitTargetAttributes(STI); | |||
684 | ||||
685 | // Not in an ITBlock to start with. | |||
686 | ITState.CurPosition = ~0U; | |||
687 | ||||
688 | VPTState.CurPosition = ~0U; | |||
689 | ||||
690 | NextSymbolIsThumb = false; | |||
691 | } | |||
692 | ||||
693 | // Implementation of the MCTargetAsmParser interface: | |||
694 | bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override; | |||
695 | OperandMatchResultTy tryParseRegister(unsigned &RegNo, SMLoc &StartLoc, | |||
696 | SMLoc &EndLoc) override; | |||
697 | bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, | |||
698 | SMLoc NameLoc, OperandVector &Operands) override; | |||
699 | bool ParseDirective(AsmToken DirectiveID) override; | |||
700 | ||||
701 | unsigned validateTargetOperandClass(MCParsedAsmOperand &Op, | |||
702 | unsigned Kind) override; | |||
703 | unsigned checkTargetMatchPredicate(MCInst &Inst) override; | |||
704 | ||||
705 | bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, | |||
706 | OperandVector &Operands, MCStreamer &Out, | |||
707 | uint64_t &ErrorInfo, | |||
708 | bool MatchingInlineAsm) override; | |||
709 | unsigned MatchInstruction(OperandVector &Operands, MCInst &Inst, | |||
710 | SmallVectorImpl<NearMissInfo> &NearMisses, | |||
711 | bool MatchingInlineAsm, bool &EmitInITBlock, | |||
712 | MCStreamer &Out); | |||
713 | ||||
714 | struct NearMissMessage { | |||
715 | SMLoc Loc; | |||
716 | SmallString<128> Message; | |||
717 | }; | |||
718 | ||||
719 | const char *getCustomOperandDiag(ARMMatchResultTy MatchError); | |||
720 | ||||
721 | void FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn, | |||
722 | SmallVectorImpl<NearMissMessage> &NearMissesOut, | |||
723 | SMLoc IDLoc, OperandVector &Operands); | |||
724 | void ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses, SMLoc IDLoc, | |||
725 | OperandVector &Operands); | |||
726 | ||||
727 | void doBeforeLabelEmit(MCSymbol *Symbol) override; | |||
728 | ||||
729 | void onLabelParsed(MCSymbol *Symbol) override; | |||
730 | }; | |||
731 | ||||
732 | /// ARMOperand - Instances of this class represent a parsed ARM machine | |||
733 | /// operand. | |||
734 | class ARMOperand : public MCParsedAsmOperand { | |||
735 | enum KindTy { | |||
736 | k_CondCode, | |||
737 | k_VPTPred, | |||
738 | k_CCOut, | |||
739 | k_ITCondMask, | |||
740 | k_CoprocNum, | |||
741 | k_CoprocReg, | |||
742 | k_CoprocOption, | |||
743 | k_Immediate, | |||
744 | k_MemBarrierOpt, | |||
745 | k_InstSyncBarrierOpt, | |||
746 | k_TraceSyncBarrierOpt, | |||
747 | k_Memory, | |||
748 | k_PostIndexRegister, | |||
749 | k_MSRMask, | |||
750 | k_BankedReg, | |||
751 | k_ProcIFlags, | |||
752 | k_VectorIndex, | |||
753 | k_Register, | |||
754 | k_RegisterList, | |||
755 | k_RegisterListWithAPSR, | |||
756 | k_DPRRegisterList, | |||
757 | k_SPRRegisterList, | |||
758 | k_FPSRegisterListWithVPR, | |||
759 | k_FPDRegisterListWithVPR, | |||
760 | k_VectorList, | |||
761 | k_VectorListAllLanes, | |||
762 | k_VectorListIndexed, | |||
763 | k_ShiftedRegister, | |||
764 | k_ShiftedImmediate, | |||
765 | k_ShifterImmediate, | |||
766 | k_RotateImmediate, | |||
767 | k_ModifiedImmediate, | |||
768 | k_ConstantPoolImmediate, | |||
769 | k_BitfieldDescriptor, | |||
770 | k_Token, | |||
771 | } Kind; | |||
772 | ||||
773 | SMLoc StartLoc, EndLoc, AlignmentLoc; | |||
774 | SmallVector<unsigned, 8> Registers; | |||
775 | ||||
776 | struct CCOp { | |||
777 | ARMCC::CondCodes Val; | |||
778 | }; | |||
779 | ||||
780 | struct VCCOp { | |||
781 | ARMVCC::VPTCodes Val; | |||
782 | }; | |||
783 | ||||
784 | struct CopOp { | |||
785 | unsigned Val; | |||
786 | }; | |||
787 | ||||
788 | struct CoprocOptionOp { | |||
789 | unsigned Val; | |||
790 | }; | |||
791 | ||||
792 | struct ITMaskOp { | |||
793 | unsigned Mask:4; | |||
794 | }; | |||
795 | ||||
796 | struct MBOptOp { | |||
797 | ARM_MB::MemBOpt Val; | |||
798 | }; | |||
799 | ||||
800 | struct ISBOptOp { | |||
801 | ARM_ISB::InstSyncBOpt Val; | |||
802 | }; | |||
803 | ||||
804 | struct TSBOptOp { | |||
805 | ARM_TSB::TraceSyncBOpt Val; | |||
806 | }; | |||
807 | ||||
808 | struct IFlagsOp { | |||
809 | ARM_PROC::IFlags Val; | |||
810 | }; | |||
811 | ||||
812 | struct MMaskOp { | |||
813 | unsigned Val; | |||
814 | }; | |||
815 | ||||
816 | struct BankedRegOp { | |||
817 | unsigned Val; | |||
818 | }; | |||
819 | ||||
820 | struct TokOp { | |||
821 | const char *Data; | |||
822 | unsigned Length; | |||
823 | }; | |||
824 | ||||
825 | struct RegOp { | |||
826 | unsigned RegNum; | |||
827 | }; | |||
828 | ||||
829 | // A vector register list is a sequential list of 1 to 4 registers. | |||
830 | struct VectorListOp { | |||
831 | unsigned RegNum; | |||
832 | unsigned Count; | |||
833 | unsigned LaneIndex; | |||
834 | bool isDoubleSpaced; | |||
835 | }; | |||
836 | ||||
837 | struct VectorIndexOp { | |||
838 | unsigned Val; | |||
839 | }; | |||
840 | ||||
841 | struct ImmOp { | |||
842 | const MCExpr *Val; | |||
843 | }; | |||
844 | ||||
845 | /// Combined record for all forms of ARM address expressions. | |||
846 | struct MemoryOp { | |||
847 | unsigned BaseRegNum; | |||
848 | // Offset is in OffsetReg or OffsetImm. If both are zero, no offset | |||
849 | // was specified. | |||
850 | const MCExpr *OffsetImm; // Offset immediate value | |||
851 | unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL | |||
852 | ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg | |||
853 | unsigned ShiftImm; // shift for OffsetReg. | |||
854 | unsigned Alignment; // 0 = no alignment specified | |||
855 | // n = alignment in bytes (2, 4, 8, 16, or 32) | |||
856 | unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit) | |||
857 | }; | |||
858 | ||||
859 | struct PostIdxRegOp { | |||
860 | unsigned RegNum; | |||
861 | bool isAdd; | |||
862 | ARM_AM::ShiftOpc ShiftTy; | |||
863 | unsigned ShiftImm; | |||
864 | }; | |||
865 | ||||
866 | struct ShifterImmOp { | |||
867 | bool isASR; | |||
868 | unsigned Imm; | |||
869 | }; | |||
870 | ||||
871 | struct RegShiftedRegOp { | |||
872 | ARM_AM::ShiftOpc ShiftTy; | |||
873 | unsigned SrcReg; | |||
874 | unsigned ShiftReg; | |||
875 | unsigned ShiftImm; | |||
876 | }; | |||
877 | ||||
878 | struct RegShiftedImmOp { | |||
879 | ARM_AM::ShiftOpc ShiftTy; | |||
880 | unsigned SrcReg; | |||
881 | unsigned ShiftImm; | |||
882 | }; | |||
883 | ||||
884 | struct RotImmOp { | |||
885 | unsigned Imm; | |||
886 | }; | |||
887 | ||||
888 | struct ModImmOp { | |||
889 | unsigned Bits; | |||
890 | unsigned Rot; | |||
891 | }; | |||
892 | ||||
893 | struct BitfieldOp { | |||
894 | unsigned LSB; | |||
895 | unsigned Width; | |||
896 | }; | |||
897 | ||||
898 | union { | |||
899 | struct CCOp CC; | |||
900 | struct VCCOp VCC; | |||
901 | struct CopOp Cop; | |||
902 | struct CoprocOptionOp CoprocOption; | |||
903 | struct MBOptOp MBOpt; | |||
904 | struct ISBOptOp ISBOpt; | |||
905 | struct TSBOptOp TSBOpt; | |||
906 | struct ITMaskOp ITMask; | |||
907 | struct IFlagsOp IFlags; | |||
908 | struct MMaskOp MMask; | |||
909 | struct BankedRegOp BankedReg; | |||
910 | struct TokOp Tok; | |||
911 | struct RegOp Reg; | |||
912 | struct VectorListOp VectorList; | |||
913 | struct VectorIndexOp VectorIndex; | |||
914 | struct ImmOp Imm; | |||
915 | struct MemoryOp Memory; | |||
916 | struct PostIdxRegOp PostIdxReg; | |||
917 | struct ShifterImmOp ShifterImm; | |||
918 | struct RegShiftedRegOp RegShiftedReg; | |||
919 | struct RegShiftedImmOp RegShiftedImm; | |||
920 | struct RotImmOp RotImm; | |||
921 | struct ModImmOp ModImm; | |||
922 | struct BitfieldOp Bitfield; | |||
923 | }; | |||
924 | ||||
925 | public: | |||
926 | ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {} | |||
927 | ||||
928 | /// getStartLoc - Get the location of the first token of this operand. | |||
929 | SMLoc getStartLoc() const override { return StartLoc; } | |||
930 | ||||
931 | /// getEndLoc - Get the location of the last token of this operand. | |||
932 | SMLoc getEndLoc() const override { return EndLoc; } | |||
933 | ||||
934 | /// getLocRange - Get the range between the first and last token of this | |||
935 | /// operand. | |||
936 | SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); } | |||
937 | ||||
938 | /// getAlignmentLoc - Get the location of the Alignment token of this operand. | |||
939 | SMLoc getAlignmentLoc() const { | |||
940 | assert(Kind == k_Memory && "Invalid access!")(static_cast <bool> (Kind == k_Memory && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_Memory && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 940, __extension__ __PRETTY_FUNCTION__)); | |||
941 | return AlignmentLoc; | |||
942 | } | |||
943 | ||||
944 | ARMCC::CondCodes getCondCode() const { | |||
945 | assert(Kind == k_CondCode && "Invalid access!")(static_cast <bool> (Kind == k_CondCode && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_CondCode && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 945, __extension__ __PRETTY_FUNCTION__)); | |||
946 | return CC.Val; | |||
947 | } | |||
948 | ||||
949 | ARMVCC::VPTCodes getVPTPred() const { | |||
950 | assert(isVPTPred() && "Invalid access!")(static_cast <bool> (isVPTPred() && "Invalid access!" ) ? void (0) : __assert_fail ("isVPTPred() && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 950, __extension__ __PRETTY_FUNCTION__)); | |||
951 | return VCC.Val; | |||
952 | } | |||
953 | ||||
954 | unsigned getCoproc() const { | |||
955 | assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!")(static_cast <bool> ((Kind == k_CoprocNum || Kind == k_CoprocReg ) && "Invalid access!") ? void (0) : __assert_fail ("(Kind == k_CoprocNum || Kind == k_CoprocReg) && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 955, __extension__ __PRETTY_FUNCTION__)); | |||
956 | return Cop.Val; | |||
957 | } | |||
958 | ||||
959 | StringRef getToken() const { | |||
960 | assert(Kind == k_Token && "Invalid access!")(static_cast <bool> (Kind == k_Token && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 960, __extension__ __PRETTY_FUNCTION__)); | |||
961 | return StringRef(Tok.Data, Tok.Length); | |||
962 | } | |||
963 | ||||
964 | unsigned getReg() const override { | |||
965 | assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!")(static_cast <bool> ((Kind == k_Register || Kind == k_CCOut ) && "Invalid access!") ? void (0) : __assert_fail ("(Kind == k_Register || Kind == k_CCOut) && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 965, __extension__ __PRETTY_FUNCTION__)); | |||
966 | return Reg.RegNum; | |||
967 | } | |||
968 | ||||
969 | const SmallVectorImpl<unsigned> &getRegList() const { | |||
970 | assert((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR ||(static_cast <bool> ((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR || Kind == k_DPRRegisterList || Kind == k_SPRRegisterList || Kind == k_FPSRegisterListWithVPR || Kind == k_FPDRegisterListWithVPR) && "Invalid access!") ? void (0) : __assert_fail ("(Kind == k_RegisterList || Kind == k_RegisterListWithAPSR || Kind == k_DPRRegisterList || Kind == k_SPRRegisterList || Kind == k_FPSRegisterListWithVPR || Kind == k_FPDRegisterListWithVPR) && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 974, __extension__ __PRETTY_FUNCTION__)) | |||
971 | Kind == k_DPRRegisterList || Kind == k_SPRRegisterList ||(static_cast <bool> ((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR || Kind == k_DPRRegisterList || Kind == k_SPRRegisterList || Kind == k_FPSRegisterListWithVPR || Kind == k_FPDRegisterListWithVPR) && "Invalid access!") ? void (0) : __assert_fail ("(Kind == k_RegisterList || Kind == k_RegisterListWithAPSR || Kind == k_DPRRegisterList || Kind == k_SPRRegisterList || Kind == k_FPSRegisterListWithVPR || Kind == k_FPDRegisterListWithVPR) && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 974, __extension__ __PRETTY_FUNCTION__)) | |||
972 | Kind == k_FPSRegisterListWithVPR ||(static_cast <bool> ((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR || Kind == k_DPRRegisterList || Kind == k_SPRRegisterList || Kind == k_FPSRegisterListWithVPR || Kind == k_FPDRegisterListWithVPR) && "Invalid access!") ? void (0) : __assert_fail ("(Kind == k_RegisterList || Kind == k_RegisterListWithAPSR || Kind == k_DPRRegisterList || Kind == k_SPRRegisterList || Kind == k_FPSRegisterListWithVPR || Kind == k_FPDRegisterListWithVPR) && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 974, __extension__ __PRETTY_FUNCTION__)) | |||
973 | Kind == k_FPDRegisterListWithVPR) &&(static_cast <bool> ((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR || Kind == k_DPRRegisterList || Kind == k_SPRRegisterList || Kind == k_FPSRegisterListWithVPR || Kind == k_FPDRegisterListWithVPR) && "Invalid access!") ? void (0) : __assert_fail ("(Kind == k_RegisterList || Kind == k_RegisterListWithAPSR || Kind == k_DPRRegisterList || Kind == k_SPRRegisterList || Kind == k_FPSRegisterListWithVPR || Kind == k_FPDRegisterListWithVPR) && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 974, __extension__ __PRETTY_FUNCTION__)) | |||
974 | "Invalid access!")(static_cast <bool> ((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR || Kind == k_DPRRegisterList || Kind == k_SPRRegisterList || Kind == k_FPSRegisterListWithVPR || Kind == k_FPDRegisterListWithVPR) && "Invalid access!") ? void (0) : __assert_fail ("(Kind == k_RegisterList || Kind == k_RegisterListWithAPSR || Kind == k_DPRRegisterList || Kind == k_SPRRegisterList || Kind == k_FPSRegisterListWithVPR || Kind == k_FPDRegisterListWithVPR) && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 974, __extension__ __PRETTY_FUNCTION__)); | |||
975 | return Registers; | |||
976 | } | |||
977 | ||||
978 | const MCExpr *getImm() const { | |||
979 | assert(isImm() && "Invalid access!")(static_cast <bool> (isImm() && "Invalid access!" ) ? void (0) : __assert_fail ("isImm() && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 979, __extension__ __PRETTY_FUNCTION__)); | |||
980 | return Imm.Val; | |||
981 | } | |||
982 | ||||
983 | const MCExpr *getConstantPoolImm() const { | |||
984 | assert(isConstantPoolImm() && "Invalid access!")(static_cast <bool> (isConstantPoolImm() && "Invalid access!" ) ? void (0) : __assert_fail ("isConstantPoolImm() && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 984, __extension__ __PRETTY_FUNCTION__)); | |||
985 | return Imm.Val; | |||
986 | } | |||
987 | ||||
988 | unsigned getVectorIndex() const { | |||
989 | assert(Kind == k_VectorIndex && "Invalid access!")(static_cast <bool> (Kind == k_VectorIndex && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_VectorIndex && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 989, __extension__ __PRETTY_FUNCTION__)); | |||
990 | return VectorIndex.Val; | |||
991 | } | |||
992 | ||||
993 | ARM_MB::MemBOpt getMemBarrierOpt() const { | |||
994 | assert(Kind == k_MemBarrierOpt && "Invalid access!")(static_cast <bool> (Kind == k_MemBarrierOpt && "Invalid access!") ? void (0) : __assert_fail ("Kind == k_MemBarrierOpt && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 994, __extension__ __PRETTY_FUNCTION__)); | |||
995 | return MBOpt.Val; | |||
996 | } | |||
997 | ||||
998 | ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const { | |||
999 | assert(Kind == k_InstSyncBarrierOpt && "Invalid access!")(static_cast <bool> (Kind == k_InstSyncBarrierOpt && "Invalid access!") ? void (0) : __assert_fail ("Kind == k_InstSyncBarrierOpt && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 999, __extension__ __PRETTY_FUNCTION__)); | |||
1000 | return ISBOpt.Val; | |||
1001 | } | |||
1002 | ||||
1003 | ARM_TSB::TraceSyncBOpt getTraceSyncBarrierOpt() const { | |||
1004 | assert(Kind == k_TraceSyncBarrierOpt && "Invalid access!")(static_cast <bool> (Kind == k_TraceSyncBarrierOpt && "Invalid access!") ? void (0) : __assert_fail ("Kind == k_TraceSyncBarrierOpt && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 1004, __extension__ __PRETTY_FUNCTION__)); | |||
1005 | return TSBOpt.Val; | |||
1006 | } | |||
1007 | ||||
1008 | ARM_PROC::IFlags getProcIFlags() const { | |||
1009 | assert(Kind == k_ProcIFlags && "Invalid access!")(static_cast <bool> (Kind == k_ProcIFlags && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_ProcIFlags && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 1009, __extension__ __PRETTY_FUNCTION__)); | |||
1010 | return IFlags.Val; | |||
1011 | } | |||
1012 | ||||
1013 | unsigned getMSRMask() const { | |||
1014 | assert(Kind == k_MSRMask && "Invalid access!")(static_cast <bool> (Kind == k_MSRMask && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_MSRMask && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 1014, __extension__ __PRETTY_FUNCTION__)); | |||
1015 | return MMask.Val; | |||
1016 | } | |||
1017 | ||||
1018 | unsigned getBankedReg() const { | |||
1019 | assert(Kind == k_BankedReg && "Invalid access!")(static_cast <bool> (Kind == k_BankedReg && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_BankedReg && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 1019, __extension__ __PRETTY_FUNCTION__)); | |||
1020 | return BankedReg.Val; | |||
1021 | } | |||
1022 | ||||
1023 | bool isCoprocNum() const { return Kind == k_CoprocNum; } | |||
1024 | bool isCoprocReg() const { return Kind == k_CoprocReg; } | |||
1025 | bool isCoprocOption() const { return Kind == k_CoprocOption; } | |||
1026 | bool isCondCode() const { return Kind == k_CondCode; } | |||
1027 | bool isVPTPred() const { return Kind == k_VPTPred; } | |||
1028 | bool isCCOut() const { return Kind == k_CCOut; } | |||
1029 | bool isITMask() const { return Kind == k_ITCondMask; } | |||
1030 | bool isITCondCode() const { return Kind == k_CondCode; } | |||
1031 | bool isImm() const override { | |||
1032 | return Kind == k_Immediate; | |||
1033 | } | |||
1034 | ||||
1035 | bool isARMBranchTarget() const { | |||
1036 | if (!isImm()) return false; | |||
1037 | ||||
1038 | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) | |||
1039 | return CE->getValue() % 4 == 0; | |||
1040 | return true; | |||
1041 | } | |||
1042 | ||||
1043 | ||||
1044 | bool isThumbBranchTarget() const { | |||
1045 | if (!isImm()) return false; | |||
1046 | ||||
1047 | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) | |||
1048 | return CE->getValue() % 2 == 0; | |||
1049 | return true; | |||
1050 | } | |||
1051 | ||||
1052 | // checks whether this operand is an unsigned offset which fits is a field | |||
1053 | // of specified width and scaled by a specific number of bits | |||
1054 | template<unsigned width, unsigned scale> | |||
1055 | bool isUnsignedOffset() const { | |||
1056 | if (!isImm()) return false; | |||
1057 | if (isa<MCSymbolRefExpr>(Imm.Val)) return true; | |||
1058 | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) { | |||
1059 | int64_t Val = CE->getValue(); | |||
1060 | int64_t Align = 1LL << scale; | |||
1061 | int64_t Max = Align * ((1LL << width) - 1); | |||
1062 | return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max); | |||
1063 | } | |||
1064 | return false; | |||
1065 | } | |||
1066 | ||||
1067 | // checks whether this operand is an signed offset which fits is a field | |||
1068 | // of specified width and scaled by a specific number of bits | |||
1069 | template<unsigned width, unsigned scale> | |||
1070 | bool isSignedOffset() const { | |||
1071 | if (!isImm()) return false; | |||
1072 | if (isa<MCSymbolRefExpr>(Imm.Val)) return true; | |||
1073 | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) { | |||
1074 | int64_t Val = CE->getValue(); | |||
1075 | int64_t Align = 1LL << scale; | |||
1076 | int64_t Max = Align * ((1LL << (width-1)) - 1); | |||
1077 | int64_t Min = -Align * (1LL << (width-1)); | |||
1078 | return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max); | |||
1079 | } | |||
1080 | return false; | |||
1081 | } | |||
1082 | ||||
1083 | // checks whether this operand is an offset suitable for the LE / | |||
1084 | // LETP instructions in Arm v8.1M | |||
1085 | bool isLEOffset() const { | |||
1086 | if (!isImm()) return false; | |||
1087 | if (isa<MCSymbolRefExpr>(Imm.Val)) return true; | |||
1088 | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) { | |||
1089 | int64_t Val = CE->getValue(); | |||
1090 | return Val < 0 && Val >= -4094 && (Val & 1) == 0; | |||
1091 | } | |||
1092 | return false; | |||
1093 | } | |||
1094 | ||||
1095 | // checks whether this operand is a memory operand computed as an offset | |||
1096 | // applied to PC. the offset may have 8 bits of magnitude and is represented | |||
1097 | // with two bits of shift. textually it may be either [pc, #imm], #imm or | |||
1098 | // relocable expression... | |||
1099 | bool isThumbMemPC() const { | |||
1100 | int64_t Val = 0; | |||
1101 | if (isImm()) { | |||
1102 | if (isa<MCSymbolRefExpr>(Imm.Val)) return true; | |||
1103 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val); | |||
1104 | if (!CE) return false; | |||
1105 | Val = CE->getValue(); | |||
1106 | } | |||
1107 | else if (isGPRMem()) { | |||
1108 | if(!Memory.OffsetImm || Memory.OffsetRegNum) return false; | |||
1109 | if(Memory.BaseRegNum != ARM::PC) return false; | |||
1110 | if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) | |||
1111 | Val = CE->getValue(); | |||
1112 | else | |||
1113 | return false; | |||
1114 | } | |||
1115 | else return false; | |||
1116 | return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020); | |||
1117 | } | |||
1118 | ||||
1119 | bool isFPImm() const { | |||
1120 | if (!isImm()) return false; | |||
1121 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
1122 | if (!CE) return false; | |||
1123 | int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue())); | |||
1124 | return Val != -1; | |||
1125 | } | |||
1126 | ||||
1127 | template<int64_t N, int64_t M> | |||
1128 | bool isImmediate() const { | |||
1129 | if (!isImm()) return false; | |||
1130 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
1131 | if (!CE) return false; | |||
1132 | int64_t Value = CE->getValue(); | |||
1133 | return Value >= N && Value <= M; | |||
1134 | } | |||
1135 | ||||
1136 | template<int64_t N, int64_t M> | |||
1137 | bool isImmediateS4() const { | |||
1138 | if (!isImm()) return false; | |||
1139 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
1140 | if (!CE) return false; | |||
1141 | int64_t Value = CE->getValue(); | |||
1142 | return ((Value & 3) == 0) && Value >= N && Value <= M; | |||
1143 | } | |||
1144 | template<int64_t N, int64_t M> | |||
1145 | bool isImmediateS2() const { | |||
1146 | if (!isImm()) return false; | |||
1147 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
1148 | if (!CE) return false; | |||
1149 | int64_t Value = CE->getValue(); | |||
1150 | return ((Value & 1) == 0) && Value >= N && Value <= M; | |||
1151 | } | |||
1152 | bool isFBits16() const { | |||
1153 | return isImmediate<0, 17>(); | |||
1154 | } | |||
1155 | bool isFBits32() const { | |||
1156 | return isImmediate<1, 33>(); | |||
1157 | } | |||
1158 | bool isImm8s4() const { | |||
1159 | return isImmediateS4<-1020, 1020>(); | |||
1160 | } | |||
1161 | bool isImm7s4() const { | |||
1162 | return isImmediateS4<-508, 508>(); | |||
1163 | } | |||
1164 | bool isImm7Shift0() const { | |||
1165 | return isImmediate<-127, 127>(); | |||
1166 | } | |||
1167 | bool isImm7Shift1() const { | |||
1168 | return isImmediateS2<-255, 255>(); | |||
1169 | } | |||
1170 | bool isImm7Shift2() const { | |||
1171 | return isImmediateS4<-511, 511>(); | |||
1172 | } | |||
1173 | bool isImm7() const { | |||
1174 | return isImmediate<-127, 127>(); | |||
1175 | } | |||
1176 | bool isImm0_1020s4() const { | |||
1177 | return isImmediateS4<0, 1020>(); | |||
1178 | } | |||
1179 | bool isImm0_508s4() const { | |||
1180 | return isImmediateS4<0, 508>(); | |||
1181 | } | |||
1182 | bool isImm0_508s4Neg() const { | |||
1183 | if (!isImm()) return false; | |||
1184 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
1185 | if (!CE) return false; | |||
1186 | int64_t Value = -CE->getValue(); | |||
1187 | // explicitly exclude zero. we want that to use the normal 0_508 version. | |||
1188 | return ((Value & 3) == 0) && Value > 0 && Value <= 508; | |||
1189 | } | |||
1190 | ||||
1191 | bool isImm0_4095Neg() const { | |||
1192 | if (!isImm()) return false; | |||
1193 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
1194 | if (!CE) return false; | |||
1195 | // isImm0_4095Neg is used with 32-bit immediates only. | |||
1196 | // 32-bit immediates are zero extended to 64-bit when parsed, | |||
1197 | // thus simple -CE->getValue() results in a big negative number, | |||
1198 | // not a small positive number as intended | |||
1199 | if ((CE->getValue() >> 32) > 0) return false; | |||
1200 | uint32_t Value = -static_cast<uint32_t>(CE->getValue()); | |||
1201 | return Value > 0 && Value < 4096; | |||
1202 | } | |||
1203 | ||||
1204 | bool isImm0_7() const { | |||
1205 | return isImmediate<0, 7>(); | |||
1206 | } | |||
1207 | ||||
1208 | bool isImm1_16() const { | |||
1209 | return isImmediate<1, 16>(); | |||
1210 | } | |||
1211 | ||||
1212 | bool isImm1_32() const { | |||
1213 | return isImmediate<1, 32>(); | |||
1214 | } | |||
1215 | ||||
1216 | bool isImm8_255() const { | |||
1217 | return isImmediate<8, 255>(); | |||
1218 | } | |||
1219 | ||||
1220 | bool isImm256_65535Expr() const { | |||
1221 | if (!isImm()) return false; | |||
1222 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
1223 | // If it's not a constant expression, it'll generate a fixup and be | |||
1224 | // handled later. | |||
1225 | if (!CE) return true; | |||
1226 | int64_t Value = CE->getValue(); | |||
1227 | return Value >= 256 && Value < 65536; | |||
1228 | } | |||
1229 | ||||
1230 | bool isImm0_65535Expr() const { | |||
1231 | if (!isImm()) return false; | |||
1232 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
1233 | // If it's not a constant expression, it'll generate a fixup and be | |||
1234 | // handled later. | |||
1235 | if (!CE) return true; | |||
1236 | int64_t Value = CE->getValue(); | |||
1237 | return Value >= 0 && Value < 65536; | |||
1238 | } | |||
1239 | ||||
1240 | bool isImm24bit() const { | |||
1241 | return isImmediate<0, 0xffffff + 1>(); | |||
1242 | } | |||
1243 | ||||
1244 | bool isImmThumbSR() const { | |||
1245 | return isImmediate<1, 33>(); | |||
1246 | } | |||
1247 | ||||
1248 | template<int shift> | |||
1249 | bool isExpImmValue(uint64_t Value) const { | |||
1250 | uint64_t mask = (1 << shift) - 1; | |||
1251 | if ((Value & mask) != 0 || (Value >> shift) > 0xff) | |||
1252 | return false; | |||
1253 | return true; | |||
1254 | } | |||
1255 | ||||
1256 | template<int shift> | |||
1257 | bool isExpImm() const { | |||
1258 | if (!isImm()) return false; | |||
1259 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
1260 | if (!CE) return false; | |||
1261 | ||||
1262 | return isExpImmValue<shift>(CE->getValue()); | |||
1263 | } | |||
1264 | ||||
1265 | template<int shift, int size> | |||
1266 | bool isInvertedExpImm() const { | |||
1267 | if (!isImm()) return false; | |||
1268 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
1269 | if (!CE) return false; | |||
1270 | ||||
1271 | uint64_t OriginalValue = CE->getValue(); | |||
1272 | uint64_t InvertedValue = OriginalValue ^ (((uint64_t)1 << size) - 1); | |||
1273 | return isExpImmValue<shift>(InvertedValue); | |||
1274 | } | |||
1275 | ||||
1276 | bool isPKHLSLImm() const { | |||
1277 | return isImmediate<0, 32>(); | |||
1278 | } | |||
1279 | ||||
1280 | bool isPKHASRImm() const { | |||
1281 | return isImmediate<0, 33>(); | |||
1282 | } | |||
1283 | ||||
1284 | bool isAdrLabel() const { | |||
1285 | // If we have an immediate that's not a constant, treat it as a label | |||
1286 | // reference needing a fixup. | |||
1287 | if (isImm() && !isa<MCConstantExpr>(getImm())) | |||
1288 | return true; | |||
1289 | ||||
1290 | // If it is a constant, it must fit into a modified immediate encoding. | |||
1291 | if (!isImm()) return false; | |||
1292 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
1293 | if (!CE) return false; | |||
1294 | int64_t Value = CE->getValue(); | |||
1295 | return (ARM_AM::getSOImmVal(Value) != -1 || | |||
1296 | ARM_AM::getSOImmVal(-Value) != -1); | |||
1297 | } | |||
1298 | ||||
1299 | bool isT2SOImm() const { | |||
1300 | // If we have an immediate that's not a constant, treat it as an expression | |||
1301 | // needing a fixup. | |||
1302 | if (isImm() && !isa<MCConstantExpr>(getImm())) { | |||
1303 | // We want to avoid matching :upper16: and :lower16: as we want these | |||
1304 | // expressions to match in isImm0_65535Expr() | |||
1305 | const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(getImm()); | |||
1306 | return (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 && | |||
1307 | ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16)); | |||
1308 | } | |||
1309 | if (!isImm()) return false; | |||
1310 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
1311 | if (!CE) return false; | |||
1312 | int64_t Value = CE->getValue(); | |||
1313 | return ARM_AM::getT2SOImmVal(Value) != -1; | |||
1314 | } | |||
1315 | ||||
1316 | bool isT2SOImmNot() const { | |||
1317 | if (!isImm()) return false; | |||
1318 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
1319 | if (!CE) return false; | |||
1320 | int64_t Value = CE->getValue(); | |||
1321 | return ARM_AM::getT2SOImmVal(Value) == -1 && | |||
1322 | ARM_AM::getT2SOImmVal(~Value) != -1; | |||
1323 | } | |||
1324 | ||||
1325 | bool isT2SOImmNeg() const { | |||
1326 | if (!isImm()) return false; | |||
1327 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
1328 | if (!CE) return false; | |||
1329 | int64_t Value = CE->getValue(); | |||
1330 | // Only use this when not representable as a plain so_imm. | |||
1331 | return ARM_AM::getT2SOImmVal(Value) == -1 && | |||
1332 | ARM_AM::getT2SOImmVal(-Value) != -1; | |||
1333 | } | |||
1334 | ||||
1335 | bool isSetEndImm() const { | |||
1336 | if (!isImm()) return false; | |||
1337 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
1338 | if (!CE) return false; | |||
1339 | int64_t Value = CE->getValue(); | |||
1340 | return Value == 1 || Value == 0; | |||
1341 | } | |||
1342 | ||||
1343 | bool isReg() const override { return Kind == k_Register; } | |||
1344 | bool isRegList() const { return Kind == k_RegisterList; } | |||
1345 | bool isRegListWithAPSR() const { | |||
1346 | return Kind == k_RegisterListWithAPSR || Kind == k_RegisterList; | |||
1347 | } | |||
1348 | bool isDPRRegList() const { return Kind == k_DPRRegisterList; } | |||
1349 | bool isSPRRegList() const { return Kind == k_SPRRegisterList; } | |||
1350 | bool isFPSRegListWithVPR() const { return Kind == k_FPSRegisterListWithVPR; } | |||
1351 | bool isFPDRegListWithVPR() const { return Kind == k_FPDRegisterListWithVPR; } | |||
1352 | bool isToken() const override { return Kind == k_Token; } | |||
1353 | bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; } | |||
1354 | bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; } | |||
1355 | bool isTraceSyncBarrierOpt() const { return Kind == k_TraceSyncBarrierOpt; } | |||
1356 | bool isMem() const override { | |||
1357 | return isGPRMem() || isMVEMem(); | |||
1358 | } | |||
1359 | bool isMVEMem() const { | |||
1360 | if (Kind != k_Memory) | |||
1361 | return false; | |||
1362 | if (Memory.BaseRegNum && | |||
1363 | !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum) && | |||
1364 | !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Memory.BaseRegNum)) | |||
1365 | return false; | |||
1366 | if (Memory.OffsetRegNum && | |||
1367 | !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains( | |||
1368 | Memory.OffsetRegNum)) | |||
1369 | return false; | |||
1370 | return true; | |||
1371 | } | |||
1372 | bool isGPRMem() const { | |||
1373 | if (Kind != k_Memory) | |||
1374 | return false; | |||
1375 | if (Memory.BaseRegNum && | |||
1376 | !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum)) | |||
1377 | return false; | |||
1378 | if (Memory.OffsetRegNum && | |||
1379 | !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.OffsetRegNum)) | |||
1380 | return false; | |||
1381 | return true; | |||
1382 | } | |||
1383 | bool isShifterImm() const { return Kind == k_ShifterImmediate; } | |||
1384 | bool isRegShiftedReg() const { | |||
1385 | return Kind == k_ShiftedRegister && | |||
1386 | ARMMCRegisterClasses[ARM::GPRRegClassID].contains( | |||
1387 | RegShiftedReg.SrcReg) && | |||
1388 | ARMMCRegisterClasses[ARM::GPRRegClassID].contains( | |||
1389 | RegShiftedReg.ShiftReg); | |||
1390 | } | |||
1391 | bool isRegShiftedImm() const { | |||
1392 | return Kind == k_ShiftedImmediate && | |||
1393 | ARMMCRegisterClasses[ARM::GPRRegClassID].contains( | |||
1394 | RegShiftedImm.SrcReg); | |||
1395 | } | |||
1396 | bool isRotImm() const { return Kind == k_RotateImmediate; } | |||
1397 | ||||
1398 | template<unsigned Min, unsigned Max> | |||
1399 | bool isPowerTwoInRange() const { | |||
1400 | if (!isImm()) return false; | |||
1401 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
1402 | if (!CE) return false; | |||
1403 | int64_t Value = CE->getValue(); | |||
1404 | return Value > 0 && countPopulation((uint64_t)Value) == 1 && | |||
1405 | Value >= Min && Value <= Max; | |||
1406 | } | |||
1407 | bool isModImm() const { return Kind == k_ModifiedImmediate; } | |||
1408 | ||||
1409 | bool isModImmNot() const { | |||
1410 | if (!isImm()) return false; | |||
1411 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
1412 | if (!CE) return false; | |||
1413 | int64_t Value = CE->getValue(); | |||
1414 | return ARM_AM::getSOImmVal(~Value) != -1; | |||
1415 | } | |||
1416 | ||||
1417 | bool isModImmNeg() const { | |||
1418 | if (!isImm()) return false; | |||
1419 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
1420 | if (!CE) return false; | |||
1421 | int64_t Value = CE->getValue(); | |||
1422 | return ARM_AM::getSOImmVal(Value) == -1 && | |||
1423 | ARM_AM::getSOImmVal(-Value) != -1; | |||
1424 | } | |||
1425 | ||||
1426 | bool isThumbModImmNeg1_7() const { | |||
1427 | if (!isImm()) return false; | |||
1428 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
1429 | if (!CE) return false; | |||
1430 | int32_t Value = -(int32_t)CE->getValue(); | |||
1431 | return 0 < Value && Value < 8; | |||
1432 | } | |||
1433 | ||||
1434 | bool isThumbModImmNeg8_255() const { | |||
1435 | if (!isImm()) return false; | |||
1436 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
1437 | if (!CE) return false; | |||
1438 | int32_t Value = -(int32_t)CE->getValue(); | |||
1439 | return 7 < Value && Value < 256; | |||
1440 | } | |||
1441 | ||||
1442 | bool isConstantPoolImm() const { return Kind == k_ConstantPoolImmediate; } | |||
1443 | bool isBitfield() const { return Kind == k_BitfieldDescriptor; } | |||
1444 | bool isPostIdxRegShifted() const { | |||
1445 | return Kind == k_PostIndexRegister && | |||
1446 | ARMMCRegisterClasses[ARM::GPRRegClassID].contains(PostIdxReg.RegNum); | |||
1447 | } | |||
1448 | bool isPostIdxReg() const { | |||
1449 | return isPostIdxRegShifted() && PostIdxReg.ShiftTy == ARM_AM::no_shift; | |||
1450 | } | |||
1451 | bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const { | |||
1452 | if (!isGPRMem()) | |||
1453 | return false; | |||
1454 | // No offset of any kind. | |||
1455 | return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr && | |||
1456 | (alignOK || Memory.Alignment == Alignment); | |||
1457 | } | |||
1458 | bool isMemNoOffsetT2(bool alignOK = false, unsigned Alignment = 0) const { | |||
1459 | if (!isGPRMem()) | |||
1460 | return false; | |||
1461 | ||||
1462 | if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains( | |||
1463 | Memory.BaseRegNum)) | |||
1464 | return false; | |||
1465 | ||||
1466 | // No offset of any kind. | |||
1467 | return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr && | |||
1468 | (alignOK || Memory.Alignment == Alignment); | |||
1469 | } | |||
1470 | bool isMemNoOffsetT2NoSp(bool alignOK = false, unsigned Alignment = 0) const { | |||
1471 | if (!isGPRMem()) | |||
1472 | return false; | |||
1473 | ||||
1474 | if (!ARMMCRegisterClasses[ARM::rGPRRegClassID].contains( | |||
1475 | Memory.BaseRegNum)) | |||
1476 | return false; | |||
1477 | ||||
1478 | // No offset of any kind. | |||
1479 | return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr && | |||
1480 | (alignOK || Memory.Alignment == Alignment); | |||
1481 | } | |||
1482 | bool isMemNoOffsetT(bool alignOK = false, unsigned Alignment = 0) const { | |||
1483 | if (!isGPRMem()) | |||
1484 | return false; | |||
1485 | ||||
1486 | if (!ARMMCRegisterClasses[ARM::tGPRRegClassID].contains( | |||
1487 | Memory.BaseRegNum)) | |||
1488 | return false; | |||
1489 | ||||
1490 | // No offset of any kind. | |||
1491 | return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr && | |||
1492 | (alignOK || Memory.Alignment == Alignment); | |||
1493 | } | |||
1494 | bool isMemPCRelImm12() const { | |||
1495 | if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) | |||
1496 | return false; | |||
1497 | // Base register must be PC. | |||
1498 | if (Memory.BaseRegNum != ARM::PC) | |||
1499 | return false; | |||
1500 | // Immediate offset in range [-4095, 4095]. | |||
1501 | if (!Memory.OffsetImm) return true; | |||
1502 | if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) { | |||
1503 | int64_t Val = CE->getValue(); | |||
1504 | return (Val > -4096 && Val < 4096) || | |||
1505 | (Val == std::numeric_limits<int32_t>::min()); | |||
1506 | } | |||
1507 | return false; | |||
1508 | } | |||
1509 | ||||
1510 | bool isAlignedMemory() const { | |||
1511 | return isMemNoOffset(true); | |||
1512 | } | |||
1513 | ||||
1514 | bool isAlignedMemoryNone() const { | |||
1515 | return isMemNoOffset(false, 0); | |||
1516 | } | |||
1517 | ||||
1518 | bool isDupAlignedMemoryNone() const { | |||
1519 | return isMemNoOffset(false, 0); | |||
1520 | } | |||
1521 | ||||
1522 | bool isAlignedMemory16() const { | |||
1523 | if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2. | |||
1524 | return true; | |||
1525 | return isMemNoOffset(false, 0); | |||
1526 | } | |||
1527 | ||||
1528 | bool isDupAlignedMemory16() const { | |||
1529 | if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2. | |||
1530 | return true; | |||
1531 | return isMemNoOffset(false, 0); | |||
1532 | } | |||
1533 | ||||
1534 | bool isAlignedMemory32() const { | |||
1535 | if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4. | |||
1536 | return true; | |||
1537 | return isMemNoOffset(false, 0); | |||
1538 | } | |||
1539 | ||||
1540 | bool isDupAlignedMemory32() const { | |||
1541 | if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4. | |||
1542 | return true; | |||
1543 | return isMemNoOffset(false, 0); | |||
1544 | } | |||
1545 | ||||
1546 | bool isAlignedMemory64() const { | |||
1547 | if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8. | |||
1548 | return true; | |||
1549 | return isMemNoOffset(false, 0); | |||
1550 | } | |||
1551 | ||||
1552 | bool isDupAlignedMemory64() const { | |||
1553 | if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8. | |||
1554 | return true; | |||
1555 | return isMemNoOffset(false, 0); | |||
1556 | } | |||
1557 | ||||
1558 | bool isAlignedMemory64or128() const { | |||
1559 | if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8. | |||
1560 | return true; | |||
1561 | if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16. | |||
1562 | return true; | |||
1563 | return isMemNoOffset(false, 0); | |||
1564 | } | |||
1565 | ||||
1566 | bool isDupAlignedMemory64or128() const { | |||
1567 | if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8. | |||
1568 | return true; | |||
1569 | if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16. | |||
1570 | return true; | |||
1571 | return isMemNoOffset(false, 0); | |||
1572 | } | |||
1573 | ||||
1574 | bool isAlignedMemory64or128or256() const { | |||
1575 | if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8. | |||
1576 | return true; | |||
1577 | if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16. | |||
1578 | return true; | |||
1579 | if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32. | |||
1580 | return true; | |||
1581 | return isMemNoOffset(false, 0); | |||
1582 | } | |||
1583 | ||||
1584 | bool isAddrMode2() const { | |||
1585 | if (!isGPRMem() || Memory.Alignment != 0) return false; | |||
1586 | // Check for register offset. | |||
1587 | if (Memory.OffsetRegNum) return true; | |||
1588 | // Immediate offset in range [-4095, 4095]. | |||
1589 | if (!Memory.OffsetImm) return true; | |||
1590 | if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) { | |||
1591 | int64_t Val = CE->getValue(); | |||
1592 | return Val > -4096 && Val < 4096; | |||
1593 | } | |||
1594 | return false; | |||
1595 | } | |||
1596 | ||||
1597 | bool isAM2OffsetImm() const { | |||
1598 | if (!isImm()) return false; | |||
1599 | // Immediate offset in range [-4095, 4095]. | |||
1600 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
1601 | if (!CE) return false; | |||
1602 | int64_t Val = CE->getValue(); | |||
1603 | return (Val == std::numeric_limits<int32_t>::min()) || | |||
1604 | (Val > -4096 && Val < 4096); | |||
1605 | } | |||
1606 | ||||
1607 | bool isAddrMode3() const { | |||
1608 | // If we have an immediate that's not a constant, treat it as a label | |||
1609 | // reference needing a fixup. If it is a constant, it's something else | |||
1610 | // and we reject it. | |||
1611 | if (isImm() && !isa<MCConstantExpr>(getImm())) | |||
1612 | return true; | |||
1613 | if (!isGPRMem() || Memory.Alignment != 0) return false; | |||
1614 | // No shifts are legal for AM3. | |||
1615 | if (Memory.ShiftType != ARM_AM::no_shift) return false; | |||
1616 | // Check for register offset. | |||
1617 | if (Memory.OffsetRegNum) return true; | |||
1618 | // Immediate offset in range [-255, 255]. | |||
1619 | if (!Memory.OffsetImm) return true; | |||
1620 | if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) { | |||
1621 | int64_t Val = CE->getValue(); | |||
1622 | // The #-0 offset is encoded as std::numeric_limits<int32_t>::min(), and | |||
1623 | // we have to check for this too. | |||
1624 | return (Val > -256 && Val < 256) || | |||
1625 | Val == std::numeric_limits<int32_t>::min(); | |||
1626 | } | |||
1627 | return false; | |||
1628 | } | |||
1629 | ||||
1630 | bool isAM3Offset() const { | |||
1631 | if (isPostIdxReg()) | |||
1632 | return true; | |||
1633 | if (!isImm()) | |||
1634 | return false; | |||
1635 | // Immediate offset in range [-255, 255]. | |||
1636 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
1637 | if (!CE) return false; | |||
1638 | int64_t Val = CE->getValue(); | |||
1639 | // Special case, #-0 is std::numeric_limits<int32_t>::min(). | |||
1640 | return (Val > -256 && Val < 256) || | |||
1641 | Val == std::numeric_limits<int32_t>::min(); | |||
1642 | } | |||
1643 | ||||
1644 | bool isAddrMode5() const { | |||
1645 | // If we have an immediate that's not a constant, treat it as a label | |||
1646 | // reference needing a fixup. If it is a constant, it's something else | |||
1647 | // and we reject it. | |||
1648 | if (isImm() && !isa<MCConstantExpr>(getImm())) | |||
1649 | return true; | |||
1650 | if (!isGPRMem() || Memory.Alignment != 0) return false; | |||
1651 | // Check for register offset. | |||
1652 | if (Memory.OffsetRegNum) return false; | |||
1653 | // Immediate offset in range [-1020, 1020] and a multiple of 4. | |||
1654 | if (!Memory.OffsetImm) return true; | |||
1655 | if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) { | |||
1656 | int64_t Val = CE->getValue(); | |||
1657 | return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) || | |||
1658 | Val == std::numeric_limits<int32_t>::min(); | |||
1659 | } | |||
1660 | return false; | |||
1661 | } | |||
1662 | ||||
1663 | bool isAddrMode5FP16() const { | |||
1664 | // If we have an immediate that's not a constant, treat it as a label | |||
1665 | // reference needing a fixup. If it is a constant, it's something else | |||
1666 | // and we reject it. | |||
1667 | if (isImm() && !isa<MCConstantExpr>(getImm())) | |||
1668 | return true; | |||
1669 | if (!isGPRMem() || Memory.Alignment != 0) return false; | |||
1670 | // Check for register offset. | |||
1671 | if (Memory.OffsetRegNum) return false; | |||
1672 | // Immediate offset in range [-510, 510] and a multiple of 2. | |||
1673 | if (!Memory.OffsetImm) return true; | |||
1674 | if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) { | |||
1675 | int64_t Val = CE->getValue(); | |||
1676 | return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) || | |||
1677 | Val == std::numeric_limits<int32_t>::min(); | |||
1678 | } | |||
1679 | return false; | |||
1680 | } | |||
1681 | ||||
1682 | bool isMemTBB() const { | |||
1683 | if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative || | |||
1684 | Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) | |||
1685 | return false; | |||
1686 | return true; | |||
1687 | } | |||
1688 | ||||
1689 | bool isMemTBH() const { | |||
1690 | if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative || | |||
1691 | Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 || | |||
1692 | Memory.Alignment != 0 ) | |||
1693 | return false; | |||
1694 | return true; | |||
1695 | } | |||
1696 | ||||
1697 | bool isMemRegOffset() const { | |||
1698 | if (!isGPRMem() || !Memory.OffsetRegNum || Memory.Alignment != 0) | |||
1699 | return false; | |||
1700 | return true; | |||
1701 | } | |||
1702 | ||||
1703 | bool isT2MemRegOffset() const { | |||
1704 | if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative || | |||
1705 | Memory.Alignment != 0 || Memory.BaseRegNum == ARM::PC) | |||
1706 | return false; | |||
1707 | // Only lsl #{0, 1, 2, 3} allowed. | |||
1708 | if (Memory.ShiftType == ARM_AM::no_shift) | |||
1709 | return true; | |||
1710 | if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3) | |||
1711 | return false; | |||
1712 | return true; | |||
1713 | } | |||
1714 | ||||
1715 | bool isMemThumbRR() const { | |||
1716 | // Thumb reg+reg addressing is simple. Just two registers, a base and | |||
1717 | // an offset. No shifts, negations or any other complicating factors. | |||
1718 | if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative || | |||
1719 | Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) | |||
1720 | return false; | |||
1721 | return isARMLowRegister(Memory.BaseRegNum) && | |||
1722 | (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum)); | |||
1723 | } | |||
1724 | ||||
1725 | bool isMemThumbRIs4() const { | |||
1726 | if (!isGPRMem() || Memory.OffsetRegNum != 0 || | |||
1727 | !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) | |||
1728 | return false; | |||
1729 | // Immediate offset, multiple of 4 in range [0, 124]. | |||
1730 | if (!Memory.OffsetImm) return true; | |||
1731 | if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) { | |||
1732 | int64_t Val = CE->getValue(); | |||
1733 | return Val >= 0 && Val <= 124 && (Val % 4) == 0; | |||
1734 | } | |||
1735 | return false; | |||
1736 | } | |||
1737 | ||||
1738 | bool isMemThumbRIs2() const { | |||
1739 | if (!isGPRMem() || Memory.OffsetRegNum != 0 || | |||
1740 | !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) | |||
1741 | return false; | |||
1742 | // Immediate offset, multiple of 4 in range [0, 62]. | |||
1743 | if (!Memory.OffsetImm) return true; | |||
1744 | if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) { | |||
1745 | int64_t Val = CE->getValue(); | |||
1746 | return Val >= 0 && Val <= 62 && (Val % 2) == 0; | |||
1747 | } | |||
1748 | return false; | |||
1749 | } | |||
1750 | ||||
1751 | bool isMemThumbRIs1() const { | |||
1752 | if (!isGPRMem() || Memory.OffsetRegNum != 0 || | |||
1753 | !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) | |||
1754 | return false; | |||
1755 | // Immediate offset in range [0, 31]. | |||
1756 | if (!Memory.OffsetImm) return true; | |||
1757 | if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) { | |||
1758 | int64_t Val = CE->getValue(); | |||
1759 | return Val >= 0 && Val <= 31; | |||
1760 | } | |||
1761 | return false; | |||
1762 | } | |||
1763 | ||||
1764 | bool isMemThumbSPI() const { | |||
1765 | if (!isGPRMem() || Memory.OffsetRegNum != 0 || | |||
1766 | Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0) | |||
1767 | return false; | |||
1768 | // Immediate offset, multiple of 4 in range [0, 1020]. | |||
1769 | if (!Memory.OffsetImm) return true; | |||
1770 | if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) { | |||
1771 | int64_t Val = CE->getValue(); | |||
1772 | return Val >= 0 && Val <= 1020 && (Val % 4) == 0; | |||
1773 | } | |||
1774 | return false; | |||
1775 | } | |||
1776 | ||||
1777 | bool isMemImm8s4Offset() const { | |||
1778 | // If we have an immediate that's not a constant, treat it as a label | |||
1779 | // reference needing a fixup. If it is a constant, it's something else | |||
1780 | // and we reject it. | |||
1781 | if (isImm() && !isa<MCConstantExpr>(getImm())) | |||
1782 | return true; | |||
1783 | if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) | |||
1784 | return false; | |||
1785 | // Immediate offset a multiple of 4 in range [-1020, 1020]. | |||
1786 | if (!Memory.OffsetImm) return true; | |||
1787 | if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) { | |||
1788 | int64_t Val = CE->getValue(); | |||
1789 | // Special case, #-0 is std::numeric_limits<int32_t>::min(). | |||
1790 | return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) || | |||
1791 | Val == std::numeric_limits<int32_t>::min(); | |||
1792 | } | |||
1793 | return false; | |||
1794 | } | |||
1795 | ||||
1796 | bool isMemImm7s4Offset() const { | |||
1797 | // If we have an immediate that's not a constant, treat it as a label | |||
1798 | // reference needing a fixup. If it is a constant, it's something else | |||
1799 | // and we reject it. | |||
1800 | if (isImm() && !isa<MCConstantExpr>(getImm())) | |||
1801 | return true; | |||
1802 | if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 || | |||
1803 | !ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains( | |||
1804 | Memory.BaseRegNum)) | |||
1805 | return false; | |||
1806 | // Immediate offset a multiple of 4 in range [-508, 508]. | |||
1807 | if (!Memory.OffsetImm) return true; | |||
1808 | if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) { | |||
1809 | int64_t Val = CE->getValue(); | |||
1810 | // Special case, #-0 is INT32_MIN. | |||
1811 | return (Val >= -508 && Val <= 508 && (Val & 3) == 0) || Val == INT32_MIN(-2147483647-1); | |||
1812 | } | |||
1813 | return false; | |||
1814 | } | |||
1815 | ||||
1816 | bool isMemImm0_1020s4Offset() const { | |||
1817 | if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) | |||
1818 | return false; | |||
1819 | // Immediate offset a multiple of 4 in range [0, 1020]. | |||
1820 | if (!Memory.OffsetImm) return true; | |||
1821 | if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) { | |||
1822 | int64_t Val = CE->getValue(); | |||
1823 | return Val >= 0 && Val <= 1020 && (Val & 3) == 0; | |||
1824 | } | |||
1825 | return false; | |||
1826 | } | |||
1827 | ||||
1828 | bool isMemImm8Offset() const { | |||
1829 | if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) | |||
1830 | return false; | |||
1831 | // Base reg of PC isn't allowed for these encodings. | |||
1832 | if (Memory.BaseRegNum == ARM::PC) return false; | |||
1833 | // Immediate offset in range [-255, 255]. | |||
1834 | if (!Memory.OffsetImm) return true; | |||
1835 | if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) { | |||
1836 | int64_t Val = CE->getValue(); | |||
1837 | return (Val == std::numeric_limits<int32_t>::min()) || | |||
1838 | (Val > -256 && Val < 256); | |||
1839 | } | |||
1840 | return false; | |||
1841 | } | |||
1842 | ||||
1843 | template<unsigned Bits, unsigned RegClassID> | |||
1844 | bool isMemImm7ShiftedOffset() const { | |||
1845 | if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 || | |||
1846 | !ARMMCRegisterClasses[RegClassID].contains(Memory.BaseRegNum)) | |||
1847 | return false; | |||
1848 | ||||
1849 | // Expect an immediate offset equal to an element of the range | |||
1850 | // [-127, 127], shifted left by Bits. | |||
1851 | ||||
1852 | if (!Memory.OffsetImm) return true; | |||
1853 | if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) { | |||
1854 | int64_t Val = CE->getValue(); | |||
1855 | ||||
1856 | // INT32_MIN is a special-case value (indicating the encoding with | |||
1857 | // zero offset and the subtract bit set) | |||
1858 | if (Val == INT32_MIN(-2147483647-1)) | |||
1859 | return true; | |||
1860 | ||||
1861 | unsigned Divisor = 1U << Bits; | |||
1862 | ||||
1863 | // Check that the low bits are zero | |||
1864 | if (Val % Divisor != 0) | |||
1865 | return false; | |||
1866 | ||||
1867 | // Check that the remaining offset is within range. | |||
1868 | Val /= Divisor; | |||
1869 | return (Val >= -127 && Val <= 127); | |||
1870 | } | |||
1871 | return false; | |||
1872 | } | |||
1873 | ||||
1874 | template <int shift> bool isMemRegRQOffset() const { | |||
1875 | if (!isMVEMem() || Memory.OffsetImm != 0 || Memory.Alignment != 0) | |||
1876 | return false; | |||
1877 | ||||
1878 | if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains( | |||
1879 | Memory.BaseRegNum)) | |||
1880 | return false; | |||
1881 | if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains( | |||
1882 | Memory.OffsetRegNum)) | |||
1883 | return false; | |||
1884 | ||||
1885 | if (shift == 0 && Memory.ShiftType != ARM_AM::no_shift) | |||
1886 | return false; | |||
1887 | ||||
1888 | if (shift > 0 && | |||
1889 | (Memory.ShiftType != ARM_AM::uxtw || Memory.ShiftImm != shift)) | |||
1890 | return false; | |||
1891 | ||||
1892 | return true; | |||
1893 | } | |||
1894 | ||||
1895 | template <int shift> bool isMemRegQOffset() const { | |||
1896 | if (!isMVEMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) | |||
1897 | return false; | |||
1898 | ||||
1899 | if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains( | |||
1900 | Memory.BaseRegNum)) | |||
1901 | return false; | |||
1902 | ||||
1903 | if (!Memory.OffsetImm) | |||
1904 | return true; | |||
1905 | static_assert(shift < 56, | |||
1906 | "Such that we dont shift by a value higher than 62"); | |||
1907 | if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) { | |||
1908 | int64_t Val = CE->getValue(); | |||
1909 | ||||
1910 | // The value must be a multiple of (1 << shift) | |||
1911 | if ((Val & ((1U << shift) - 1)) != 0) | |||
1912 | return false; | |||
1913 | ||||
1914 | // And be in the right range, depending on the amount that it is shifted | |||
1915 | // by. Shift 0, is equal to 7 unsigned bits, the sign bit is set | |||
1916 | // separately. | |||
1917 | int64_t Range = (1U << (7 + shift)) - 1; | |||
1918 | return (Val == INT32_MIN(-2147483647-1)) || (Val > -Range && Val < Range); | |||
1919 | } | |||
1920 | return false; | |||
1921 | } | |||
1922 | ||||
1923 | bool isMemPosImm8Offset() const { | |||
1924 | if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) | |||
1925 | return false; | |||
1926 | // Immediate offset in range [0, 255]. | |||
1927 | if (!Memory.OffsetImm) return true; | |||
1928 | if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) { | |||
1929 | int64_t Val = CE->getValue(); | |||
1930 | return Val >= 0 && Val < 256; | |||
1931 | } | |||
1932 | return false; | |||
1933 | } | |||
1934 | ||||
1935 | bool isMemNegImm8Offset() const { | |||
1936 | if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) | |||
1937 | return false; | |||
1938 | // Base reg of PC isn't allowed for these encodings. | |||
1939 | if (Memory.BaseRegNum == ARM::PC) return false; | |||
1940 | // Immediate offset in range [-255, -1]. | |||
1941 | if (!Memory.OffsetImm) return false; | |||
1942 | if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) { | |||
1943 | int64_t Val = CE->getValue(); | |||
1944 | return (Val == std::numeric_limits<int32_t>::min()) || | |||
1945 | (Val > -256 && Val < 0); | |||
1946 | } | |||
1947 | return false; | |||
1948 | } | |||
1949 | ||||
1950 | bool isMemUImm12Offset() const { | |||
1951 | if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) | |||
1952 | return false; | |||
1953 | // Immediate offset in range [0, 4095]. | |||
1954 | if (!Memory.OffsetImm) return true; | |||
1955 | if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) { | |||
1956 | int64_t Val = CE->getValue(); | |||
1957 | return (Val >= 0 && Val < 4096); | |||
1958 | } | |||
1959 | return false; | |||
1960 | } | |||
1961 | ||||
1962 | bool isMemImm12Offset() const { | |||
1963 | // If we have an immediate that's not a constant, treat it as a label | |||
1964 | // reference needing a fixup. If it is a constant, it's something else | |||
1965 | // and we reject it. | |||
1966 | ||||
1967 | if (isImm() && !isa<MCConstantExpr>(getImm())) | |||
1968 | return true; | |||
1969 | ||||
1970 | if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) | |||
1971 | return false; | |||
1972 | // Immediate offset in range [-4095, 4095]. | |||
1973 | if (!Memory.OffsetImm) return true; | |||
1974 | if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) { | |||
1975 | int64_t Val = CE->getValue(); | |||
1976 | return (Val > -4096 && Val < 4096) || | |||
1977 | (Val == std::numeric_limits<int32_t>::min()); | |||
1978 | } | |||
1979 | // If we have an immediate that's not a constant, treat it as a | |||
1980 | // symbolic expression needing a fixup. | |||
1981 | return true; | |||
1982 | } | |||
1983 | ||||
1984 | bool isConstPoolAsmImm() const { | |||
1985 | // Delay processing of Constant Pool Immediate, this will turn into | |||
1986 | // a constant. Match no other operand | |||
1987 | return (isConstantPoolImm()); | |||
1988 | } | |||
1989 | ||||
1990 | bool isPostIdxImm8() const { | |||
1991 | if (!isImm()) return false; | |||
1992 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
1993 | if (!CE) return false; | |||
1994 | int64_t Val = CE->getValue(); | |||
1995 | return (Val > -256 && Val < 256) || | |||
1996 | (Val == std::numeric_limits<int32_t>::min()); | |||
1997 | } | |||
1998 | ||||
1999 | bool isPostIdxImm8s4() const { | |||
2000 | if (!isImm()) return false; | |||
2001 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
2002 | if (!CE) return false; | |||
2003 | int64_t Val = CE->getValue(); | |||
2004 | return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) || | |||
2005 | (Val == std::numeric_limits<int32_t>::min()); | |||
2006 | } | |||
2007 | ||||
2008 | bool isMSRMask() const { return Kind == k_MSRMask; } | |||
2009 | bool isBankedReg() const { return Kind == k_BankedReg; } | |||
2010 | bool isProcIFlags() const { return Kind == k_ProcIFlags; } | |||
2011 | ||||
2012 | // NEON operands. | |||
2013 | bool isSingleSpacedVectorList() const { | |||
2014 | return Kind == k_VectorList && !VectorList.isDoubleSpaced; | |||
2015 | } | |||
2016 | ||||
2017 | bool isDoubleSpacedVectorList() const { | |||
2018 | return Kind == k_VectorList && VectorList.isDoubleSpaced; | |||
2019 | } | |||
2020 | ||||
2021 | bool isVecListOneD() const { | |||
2022 | if (!isSingleSpacedVectorList()) return false; | |||
2023 | return VectorList.Count == 1; | |||
2024 | } | |||
2025 | ||||
2026 | bool isVecListTwoMQ() const { | |||
2027 | return isSingleSpacedVectorList() && VectorList.Count == 2 && | |||
2028 | ARMMCRegisterClasses[ARM::MQPRRegClassID].contains( | |||
2029 | VectorList.RegNum); | |||
2030 | } | |||
2031 | ||||
2032 | bool isVecListDPair() const { | |||
2033 | if (!isSingleSpacedVectorList()) return false; | |||
2034 | return (ARMMCRegisterClasses[ARM::DPairRegClassID] | |||
2035 | .contains(VectorList.RegNum)); | |||
2036 | } | |||
2037 | ||||
2038 | bool isVecListThreeD() const { | |||
2039 | if (!isSingleSpacedVectorList()) return false; | |||
2040 | return VectorList.Count == 3; | |||
2041 | } | |||
2042 | ||||
2043 | bool isVecListFourD() const { | |||
2044 | if (!isSingleSpacedVectorList()) return false; | |||
2045 | return VectorList.Count == 4; | |||
2046 | } | |||
2047 | ||||
2048 | bool isVecListDPairSpaced() const { | |||
2049 | if (Kind != k_VectorList) return false; | |||
2050 | if (isSingleSpacedVectorList()) return false; | |||
2051 | return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID] | |||
2052 | .contains(VectorList.RegNum)); | |||
2053 | } | |||
2054 | ||||
2055 | bool isVecListThreeQ() const { | |||
2056 | if (!isDoubleSpacedVectorList()) return false; | |||
2057 | return VectorList.Count == 3; | |||
2058 | } | |||
2059 | ||||
2060 | bool isVecListFourQ() const { | |||
2061 | if (!isDoubleSpacedVectorList()) return false; | |||
2062 | return VectorList.Count == 4; | |||
2063 | } | |||
2064 | ||||
2065 | bool isVecListFourMQ() const { | |||
2066 | return isSingleSpacedVectorList() && VectorList.Count == 4 && | |||
2067 | ARMMCRegisterClasses[ARM::MQPRRegClassID].contains( | |||
2068 | VectorList.RegNum); | |||
2069 | } | |||
2070 | ||||
2071 | bool isSingleSpacedVectorAllLanes() const { | |||
2072 | return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced; | |||
2073 | } | |||
2074 | ||||
2075 | bool isDoubleSpacedVectorAllLanes() const { | |||
2076 | return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced; | |||
2077 | } | |||
2078 | ||||
2079 | bool isVecListOneDAllLanes() const { | |||
2080 | if (!isSingleSpacedVectorAllLanes()) return false; | |||
2081 | return VectorList.Count == 1; | |||
2082 | } | |||
2083 | ||||
2084 | bool isVecListDPairAllLanes() const { | |||
2085 | if (!isSingleSpacedVectorAllLanes()) return false; | |||
2086 | return (ARMMCRegisterClasses[ARM::DPairRegClassID] | |||
2087 | .contains(VectorList.RegNum)); | |||
2088 | } | |||
2089 | ||||
2090 | bool isVecListDPairSpacedAllLanes() const { | |||
2091 | if (!isDoubleSpacedVectorAllLanes()) return false; | |||
2092 | return VectorList.Count == 2; | |||
2093 | } | |||
2094 | ||||
2095 | bool isVecListThreeDAllLanes() const { | |||
2096 | if (!isSingleSpacedVectorAllLanes()) return false; | |||
2097 | return VectorList.Count == 3; | |||
2098 | } | |||
2099 | ||||
2100 | bool isVecListThreeQAllLanes() const { | |||
2101 | if (!isDoubleSpacedVectorAllLanes()) return false; | |||
2102 | return VectorList.Count == 3; | |||
2103 | } | |||
2104 | ||||
2105 | bool isVecListFourDAllLanes() const { | |||
2106 | if (!isSingleSpacedVectorAllLanes()) return false; | |||
2107 | return VectorList.Count == 4; | |||
2108 | } | |||
2109 | ||||
2110 | bool isVecListFourQAllLanes() const { | |||
2111 | if (!isDoubleSpacedVectorAllLanes()) return false; | |||
2112 | return VectorList.Count == 4; | |||
2113 | } | |||
2114 | ||||
2115 | bool isSingleSpacedVectorIndexed() const { | |||
2116 | return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced; | |||
2117 | } | |||
2118 | ||||
2119 | bool isDoubleSpacedVectorIndexed() const { | |||
2120 | return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced; | |||
2121 | } | |||
2122 | ||||
2123 | bool isVecListOneDByteIndexed() const { | |||
2124 | if (!isSingleSpacedVectorIndexed()) return false; | |||
2125 | return VectorList.Count == 1 && VectorList.LaneIndex <= 7; | |||
2126 | } | |||
2127 | ||||
2128 | bool isVecListOneDHWordIndexed() const { | |||
2129 | if (!isSingleSpacedVectorIndexed()) return false; | |||
2130 | return VectorList.Count == 1 && VectorList.LaneIndex <= 3; | |||
2131 | } | |||
2132 | ||||
2133 | bool isVecListOneDWordIndexed() const { | |||
2134 | if (!isSingleSpacedVectorIndexed()) return false; | |||
2135 | return VectorList.Count == 1 && VectorList.LaneIndex <= 1; | |||
2136 | } | |||
2137 | ||||
2138 | bool isVecListTwoDByteIndexed() const { | |||
2139 | if (!isSingleSpacedVectorIndexed()) return false; | |||
2140 | return VectorList.Count == 2 && VectorList.LaneIndex <= 7; | |||
2141 | } | |||
2142 | ||||
2143 | bool isVecListTwoDHWordIndexed() const { | |||
2144 | if (!isSingleSpacedVectorIndexed()) return false; | |||
2145 | return VectorList.Count == 2 && VectorList.LaneIndex <= 3; | |||
2146 | } | |||
2147 | ||||
2148 | bool isVecListTwoQWordIndexed() const { | |||
2149 | if (!isDoubleSpacedVectorIndexed()) return false; | |||
2150 | return VectorList.Count == 2 && VectorList.LaneIndex <= 1; | |||
2151 | } | |||
2152 | ||||
2153 | bool isVecListTwoQHWordIndexed() const { | |||
2154 | if (!isDoubleSpacedVectorIndexed()) return false; | |||
2155 | return VectorList.Count == 2 && VectorList.LaneIndex <= 3; | |||
2156 | } | |||
2157 | ||||
2158 | bool isVecListTwoDWordIndexed() const { | |||
2159 | if (!isSingleSpacedVectorIndexed()) return false; | |||
2160 | return VectorList.Count == 2 && VectorList.LaneIndex <= 1; | |||
2161 | } | |||
2162 | ||||
2163 | bool isVecListThreeDByteIndexed() const { | |||
2164 | if (!isSingleSpacedVectorIndexed()) return false; | |||
2165 | return VectorList.Count == 3 && VectorList.LaneIndex <= 7; | |||
2166 | } | |||
2167 | ||||
2168 | bool isVecListThreeDHWordIndexed() const { | |||
2169 | if (!isSingleSpacedVectorIndexed()) return false; | |||
2170 | return VectorList.Count == 3 && VectorList.LaneIndex <= 3; | |||
2171 | } | |||
2172 | ||||
2173 | bool isVecListThreeQWordIndexed() const { | |||
2174 | if (!isDoubleSpacedVectorIndexed()) return false; | |||
2175 | return VectorList.Count == 3 && VectorList.LaneIndex <= 1; | |||
2176 | } | |||
2177 | ||||
2178 | bool isVecListThreeQHWordIndexed() const { | |||
2179 | if (!isDoubleSpacedVectorIndexed()) return false; | |||
2180 | return VectorList.Count == 3 && VectorList.LaneIndex <= 3; | |||
2181 | } | |||
2182 | ||||
2183 | bool isVecListThreeDWordIndexed() const { | |||
2184 | if (!isSingleSpacedVectorIndexed()) return false; | |||
2185 | return VectorList.Count == 3 && VectorList.LaneIndex <= 1; | |||
2186 | } | |||
2187 | ||||
2188 | bool isVecListFourDByteIndexed() const { | |||
2189 | if (!isSingleSpacedVectorIndexed()) return false; | |||
2190 | return VectorList.Count == 4 && VectorList.LaneIndex <= 7; | |||
2191 | } | |||
2192 | ||||
2193 | bool isVecListFourDHWordIndexed() const { | |||
2194 | if (!isSingleSpacedVectorIndexed()) return false; | |||
2195 | return VectorList.Count == 4 && VectorList.LaneIndex <= 3; | |||
2196 | } | |||
2197 | ||||
2198 | bool isVecListFourQWordIndexed() const { | |||
2199 | if (!isDoubleSpacedVectorIndexed()) return false; | |||
2200 | return VectorList.Count == 4 && VectorList.LaneIndex <= 1; | |||
2201 | } | |||
2202 | ||||
2203 | bool isVecListFourQHWordIndexed() const { | |||
2204 | if (!isDoubleSpacedVectorIndexed()) return false; | |||
2205 | return VectorList.Count == 4 && VectorList.LaneIndex <= 3; | |||
2206 | } | |||
2207 | ||||
2208 | bool isVecListFourDWordIndexed() const { | |||
2209 | if (!isSingleSpacedVectorIndexed()) return false; | |||
2210 | return VectorList.Count == 4 && VectorList.LaneIndex <= 1; | |||
2211 | } | |||
2212 | ||||
2213 | bool isVectorIndex() const { return Kind == k_VectorIndex; } | |||
2214 | ||||
2215 | template <unsigned NumLanes> | |||
2216 | bool isVectorIndexInRange() const { | |||
2217 | if (Kind != k_VectorIndex) return false; | |||
2218 | return VectorIndex.Val < NumLanes; | |||
2219 | } | |||
2220 | ||||
2221 | bool isVectorIndex8() const { return isVectorIndexInRange<8>(); } | |||
2222 | bool isVectorIndex16() const { return isVectorIndexInRange<4>(); } | |||
2223 | bool isVectorIndex32() const { return isVectorIndexInRange<2>(); } | |||
2224 | bool isVectorIndex64() const { return isVectorIndexInRange<1>(); } | |||
2225 | ||||
2226 | template<int PermittedValue, int OtherPermittedValue> | |||
2227 | bool isMVEPairVectorIndex() const { | |||
2228 | if (Kind != k_VectorIndex) return false; | |||
2229 | return VectorIndex.Val == PermittedValue || | |||
2230 | VectorIndex.Val == OtherPermittedValue; | |||
2231 | } | |||
2232 | ||||
2233 | bool isNEONi8splat() const { | |||
2234 | if (!isImm()) return false; | |||
2235 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
2236 | // Must be a constant. | |||
2237 | if (!CE) return false; | |||
2238 | int64_t Value = CE->getValue(); | |||
2239 | // i8 value splatted across 8 bytes. The immediate is just the 8 byte | |||
2240 | // value. | |||
2241 | return Value >= 0 && Value < 256; | |||
2242 | } | |||
2243 | ||||
2244 | bool isNEONi16splat() const { | |||
2245 | if (isNEONByteReplicate(2)) | |||
2246 | return false; // Leave that for bytes replication and forbid by default. | |||
2247 | if (!isImm()) | |||
2248 | return false; | |||
2249 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
2250 | // Must be a constant. | |||
2251 | if (!CE) return false; | |||
2252 | unsigned Value = CE->getValue(); | |||
2253 | return ARM_AM::isNEONi16splat(Value); | |||
2254 | } | |||
2255 | ||||
2256 | bool isNEONi16splatNot() const { | |||
2257 | if (!isImm()) | |||
2258 | return false; | |||
2259 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
2260 | // Must be a constant. | |||
2261 | if (!CE) return false; | |||
2262 | unsigned Value = CE->getValue(); | |||
2263 | return ARM_AM::isNEONi16splat(~Value & 0xffff); | |||
2264 | } | |||
2265 | ||||
2266 | bool isNEONi32splat() const { | |||
2267 | if (isNEONByteReplicate(4)) | |||
2268 | return false; // Leave that for bytes replication and forbid by default. | |||
2269 | if (!isImm()) | |||
2270 | return false; | |||
2271 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
2272 | // Must be a constant. | |||
2273 | if (!CE) return false; | |||
2274 | unsigned Value = CE->getValue(); | |||
2275 | return ARM_AM::isNEONi32splat(Value); | |||
2276 | } | |||
2277 | ||||
2278 | bool isNEONi32splatNot() const { | |||
2279 | if (!isImm()) | |||
2280 | return false; | |||
2281 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
2282 | // Must be a constant. | |||
2283 | if (!CE) return false; | |||
2284 | unsigned Value = CE->getValue(); | |||
2285 | return ARM_AM::isNEONi32splat(~Value); | |||
2286 | } | |||
2287 | ||||
2288 | static bool isValidNEONi32vmovImm(int64_t Value) { | |||
2289 | // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X, | |||
2290 | // for VMOV/VMVN only, 00Xf or 0Xff are also accepted. | |||
2291 | return ((Value & 0xffffffffffffff00) == 0) || | |||
2292 | ((Value & 0xffffffffffff00ff) == 0) || | |||
2293 | ((Value & 0xffffffffff00ffff) == 0) || | |||
2294 | ((Value & 0xffffffff00ffffff) == 0) || | |||
2295 | ((Value & 0xffffffffffff00ff) == 0xff) || | |||
2296 | ((Value & 0xffffffffff00ffff) == 0xffff); | |||
2297 | } | |||
2298 | ||||
2299 | bool isNEONReplicate(unsigned Width, unsigned NumElems, bool Inv) const { | |||
2300 | assert((Width == 8 || Width == 16 || Width == 32) &&(static_cast <bool> ((Width == 8 || Width == 16 || Width == 32) && "Invalid element width") ? void (0) : __assert_fail ("(Width == 8 || Width == 16 || Width == 32) && \"Invalid element width\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2301, __extension__ __PRETTY_FUNCTION__)) | |||
2301 | "Invalid element width")(static_cast <bool> ((Width == 8 || Width == 16 || Width == 32) && "Invalid element width") ? void (0) : __assert_fail ("(Width == 8 || Width == 16 || Width == 32) && \"Invalid element width\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2301, __extension__ __PRETTY_FUNCTION__)); | |||
2302 | assert(NumElems * Width <= 64 && "Invalid result width")(static_cast <bool> (NumElems * Width <= 64 && "Invalid result width") ? void (0) : __assert_fail ("NumElems * Width <= 64 && \"Invalid result width\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2302, __extension__ __PRETTY_FUNCTION__)); | |||
2303 | ||||
2304 | if (!isImm()) | |||
2305 | return false; | |||
2306 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
2307 | // Must be a constant. | |||
2308 | if (!CE) | |||
2309 | return false; | |||
2310 | int64_t Value = CE->getValue(); | |||
2311 | if (!Value) | |||
2312 | return false; // Don't bother with zero. | |||
2313 | if (Inv) | |||
2314 | Value = ~Value; | |||
2315 | ||||
2316 | uint64_t Mask = (1ull << Width) - 1; | |||
2317 | uint64_t Elem = Value & Mask; | |||
2318 | if (Width == 16 && (Elem & 0x00ff) != 0 && (Elem & 0xff00) != 0) | |||
2319 | return false; | |||
2320 | if (Width == 32 && !isValidNEONi32vmovImm(Elem)) | |||
2321 | return false; | |||
2322 | ||||
2323 | for (unsigned i = 1; i < NumElems; ++i) { | |||
2324 | Value >>= Width; | |||
2325 | if ((Value & Mask) != Elem) | |||
2326 | return false; | |||
2327 | } | |||
2328 | return true; | |||
2329 | } | |||
2330 | ||||
2331 | bool isNEONByteReplicate(unsigned NumBytes) const { | |||
2332 | return isNEONReplicate(8, NumBytes, false); | |||
2333 | } | |||
2334 | ||||
2335 | static void checkNeonReplicateArgs(unsigned FromW, unsigned ToW) { | |||
2336 | assert((FromW == 8 || FromW == 16 || FromW == 32) &&(static_cast <bool> ((FromW == 8 || FromW == 16 || FromW == 32) && "Invalid source width") ? void (0) : __assert_fail ("(FromW == 8 || FromW == 16 || FromW == 32) && \"Invalid source width\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2337, __extension__ __PRETTY_FUNCTION__)) | |||
2337 | "Invalid source width")(static_cast <bool> ((FromW == 8 || FromW == 16 || FromW == 32) && "Invalid source width") ? void (0) : __assert_fail ("(FromW == 8 || FromW == 16 || FromW == 32) && \"Invalid source width\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2337, __extension__ __PRETTY_FUNCTION__)); | |||
2338 | assert((ToW == 16 || ToW == 32 || ToW == 64) &&(static_cast <bool> ((ToW == 16 || ToW == 32 || ToW == 64 ) && "Invalid destination width") ? void (0) : __assert_fail ("(ToW == 16 || ToW == 32 || ToW == 64) && \"Invalid destination width\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2339, __extension__ __PRETTY_FUNCTION__)) | |||
2339 | "Invalid destination width")(static_cast <bool> ((ToW == 16 || ToW == 32 || ToW == 64 ) && "Invalid destination width") ? void (0) : __assert_fail ("(ToW == 16 || ToW == 32 || ToW == 64) && \"Invalid destination width\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2339, __extension__ __PRETTY_FUNCTION__)); | |||
2340 | assert(FromW < ToW && "ToW is not less than FromW")(static_cast <bool> (FromW < ToW && "ToW is not less than FromW" ) ? void (0) : __assert_fail ("FromW < ToW && \"ToW is not less than FromW\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2340, __extension__ __PRETTY_FUNCTION__)); | |||
2341 | } | |||
2342 | ||||
2343 | template<unsigned FromW, unsigned ToW> | |||
2344 | bool isNEONmovReplicate() const { | |||
2345 | checkNeonReplicateArgs(FromW, ToW); | |||
2346 | if (ToW == 64 && isNEONi64splat()) | |||
2347 | return false; | |||
2348 | return isNEONReplicate(FromW, ToW / FromW, false); | |||
2349 | } | |||
2350 | ||||
2351 | template<unsigned FromW, unsigned ToW> | |||
2352 | bool isNEONinvReplicate() const { | |||
2353 | checkNeonReplicateArgs(FromW, ToW); | |||
2354 | return isNEONReplicate(FromW, ToW / FromW, true); | |||
2355 | } | |||
2356 | ||||
2357 | bool isNEONi32vmov() const { | |||
2358 | if (isNEONByteReplicate(4)) | |||
2359 | return false; // Let it to be classified as byte-replicate case. | |||
2360 | if (!isImm()) | |||
2361 | return false; | |||
2362 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
2363 | // Must be a constant. | |||
2364 | if (!CE) | |||
2365 | return false; | |||
2366 | return isValidNEONi32vmovImm(CE->getValue()); | |||
2367 | } | |||
2368 | ||||
2369 | bool isNEONi32vmovNeg() const { | |||
2370 | if (!isImm()) return false; | |||
2371 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
2372 | // Must be a constant. | |||
2373 | if (!CE) return false; | |||
2374 | return isValidNEONi32vmovImm(~CE->getValue()); | |||
2375 | } | |||
2376 | ||||
2377 | bool isNEONi64splat() const { | |||
2378 | if (!isImm()) return false; | |||
2379 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
2380 | // Must be a constant. | |||
2381 | if (!CE) return false; | |||
2382 | uint64_t Value = CE->getValue(); | |||
2383 | // i64 value with each byte being either 0 or 0xff. | |||
2384 | for (unsigned i = 0; i < 8; ++i, Value >>= 8) | |||
2385 | if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false; | |||
2386 | return true; | |||
2387 | } | |||
2388 | ||||
2389 | template<int64_t Angle, int64_t Remainder> | |||
2390 | bool isComplexRotation() const { | |||
2391 | if (!isImm()) return false; | |||
2392 | ||||
2393 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
2394 | if (!CE) return false; | |||
2395 | uint64_t Value = CE->getValue(); | |||
2396 | ||||
2397 | return (Value % Angle == Remainder && Value <= 270); | |||
2398 | } | |||
2399 | ||||
2400 | bool isMVELongShift() const { | |||
2401 | if (!isImm()) return false; | |||
2402 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
2403 | // Must be a constant. | |||
2404 | if (!CE) return false; | |||
2405 | uint64_t Value = CE->getValue(); | |||
2406 | return Value >= 1 && Value <= 32; | |||
2407 | } | |||
2408 | ||||
2409 | bool isMveSaturateOp() const { | |||
2410 | if (!isImm()) return false; | |||
2411 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
2412 | if (!CE) return false; | |||
2413 | uint64_t Value = CE->getValue(); | |||
2414 | return Value == 48 || Value == 64; | |||
2415 | } | |||
2416 | ||||
2417 | bool isITCondCodeNoAL() const { | |||
2418 | if (!isITCondCode()) return false; | |||
2419 | ARMCC::CondCodes CC = getCondCode(); | |||
2420 | return CC != ARMCC::AL; | |||
2421 | } | |||
2422 | ||||
2423 | bool isITCondCodeRestrictedI() const { | |||
2424 | if (!isITCondCode()) | |||
2425 | return false; | |||
2426 | ARMCC::CondCodes CC = getCondCode(); | |||
2427 | return CC == ARMCC::EQ || CC == ARMCC::NE; | |||
2428 | } | |||
2429 | ||||
2430 | bool isITCondCodeRestrictedS() const { | |||
2431 | if (!isITCondCode()) | |||
2432 | return false; | |||
2433 | ARMCC::CondCodes CC = getCondCode(); | |||
2434 | return CC == ARMCC::LT || CC == ARMCC::GT || CC == ARMCC::LE || | |||
2435 | CC == ARMCC::GE; | |||
2436 | } | |||
2437 | ||||
2438 | bool isITCondCodeRestrictedU() const { | |||
2439 | if (!isITCondCode()) | |||
2440 | return false; | |||
2441 | ARMCC::CondCodes CC = getCondCode(); | |||
2442 | return CC == ARMCC::HS || CC == ARMCC::HI; | |||
2443 | } | |||
2444 | ||||
2445 | bool isITCondCodeRestrictedFP() const { | |||
2446 | if (!isITCondCode()) | |||
2447 | return false; | |||
2448 | ARMCC::CondCodes CC = getCondCode(); | |||
2449 | return CC == ARMCC::EQ || CC == ARMCC::NE || CC == ARMCC::LT || | |||
2450 | CC == ARMCC::GT || CC == ARMCC::LE || CC == ARMCC::GE; | |||
2451 | } | |||
2452 | ||||
2453 | void addExpr(MCInst &Inst, const MCExpr *Expr) const { | |||
2454 | // Add as immediates when possible. Null MCExpr = 0. | |||
2455 | if (!Expr) | |||
2456 | Inst.addOperand(MCOperand::createImm(0)); | |||
2457 | else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) | |||
2458 | Inst.addOperand(MCOperand::createImm(CE->getValue())); | |||
2459 | else | |||
2460 | Inst.addOperand(MCOperand::createExpr(Expr)); | |||
2461 | } | |||
2462 | ||||
2463 | void addARMBranchTargetOperands(MCInst &Inst, unsigned N) const { | |||
2464 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2464, __extension__ __PRETTY_FUNCTION__)); | |||
2465 | addExpr(Inst, getImm()); | |||
2466 | } | |||
2467 | ||||
2468 | void addThumbBranchTargetOperands(MCInst &Inst, unsigned N) const { | |||
2469 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2469, __extension__ __PRETTY_FUNCTION__)); | |||
2470 | addExpr(Inst, getImm()); | |||
2471 | } | |||
2472 | ||||
2473 | void addCondCodeOperands(MCInst &Inst, unsigned N) const { | |||
2474 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2474, __extension__ __PRETTY_FUNCTION__)); | |||
2475 | Inst.addOperand(MCOperand::createImm(unsigned(getCondCode()))); | |||
2476 | unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR; | |||
2477 | Inst.addOperand(MCOperand::createReg(RegNum)); | |||
2478 | } | |||
2479 | ||||
2480 | void addVPTPredNOperands(MCInst &Inst, unsigned N) const { | |||
2481 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2481, __extension__ __PRETTY_FUNCTION__)); | |||
2482 | Inst.addOperand(MCOperand::createImm(unsigned(getVPTPred()))); | |||
2483 | unsigned RegNum = getVPTPred() == ARMVCC::None ? 0: ARM::P0; | |||
2484 | Inst.addOperand(MCOperand::createReg(RegNum)); | |||
2485 | } | |||
2486 | ||||
2487 | void addVPTPredROperands(MCInst &Inst, unsigned N) const { | |||
2488 | assert(N == 3 && "Invalid number of operands!")(static_cast <bool> (N == 3 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 3 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2488, __extension__ __PRETTY_FUNCTION__)); | |||
2489 | addVPTPredNOperands(Inst, N-1); | |||
2490 | unsigned RegNum; | |||
2491 | if (getVPTPred() == ARMVCC::None) { | |||
2492 | RegNum = 0; | |||
2493 | } else { | |||
2494 | unsigned NextOpIndex = Inst.getNumOperands(); | |||
2495 | const MCInstrDesc &MCID = ARMInsts[Inst.getOpcode()]; | |||
2496 | int TiedOp = MCID.getOperandConstraint(NextOpIndex, MCOI::TIED_TO); | |||
2497 | assert(TiedOp >= 0 &&(static_cast <bool> (TiedOp >= 0 && "Inactive register in vpred_r is not tied to an output!" ) ? void (0) : __assert_fail ("TiedOp >= 0 && \"Inactive register in vpred_r is not tied to an output!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2498, __extension__ __PRETTY_FUNCTION__)) | |||
2498 | "Inactive register in vpred_r is not tied to an output!")(static_cast <bool> (TiedOp >= 0 && "Inactive register in vpred_r is not tied to an output!" ) ? void (0) : __assert_fail ("TiedOp >= 0 && \"Inactive register in vpred_r is not tied to an output!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2498, __extension__ __PRETTY_FUNCTION__)); | |||
2499 | RegNum = Inst.getOperand(TiedOp).getReg(); | |||
2500 | } | |||
2501 | Inst.addOperand(MCOperand::createReg(RegNum)); | |||
2502 | } | |||
2503 | ||||
2504 | void addCoprocNumOperands(MCInst &Inst, unsigned N) const { | |||
2505 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2505, __extension__ __PRETTY_FUNCTION__)); | |||
2506 | Inst.addOperand(MCOperand::createImm(getCoproc())); | |||
2507 | } | |||
2508 | ||||
2509 | void addCoprocRegOperands(MCInst &Inst, unsigned N) const { | |||
2510 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2510, __extension__ __PRETTY_FUNCTION__)); | |||
2511 | Inst.addOperand(MCOperand::createImm(getCoproc())); | |||
2512 | } | |||
2513 | ||||
2514 | void addCoprocOptionOperands(MCInst &Inst, unsigned N) const { | |||
2515 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2515, __extension__ __PRETTY_FUNCTION__)); | |||
2516 | Inst.addOperand(MCOperand::createImm(CoprocOption.Val)); | |||
2517 | } | |||
2518 | ||||
2519 | void addITMaskOperands(MCInst &Inst, unsigned N) const { | |||
2520 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2520, __extension__ __PRETTY_FUNCTION__)); | |||
2521 | Inst.addOperand(MCOperand::createImm(ITMask.Mask)); | |||
2522 | } | |||
2523 | ||||
2524 | void addITCondCodeOperands(MCInst &Inst, unsigned N) const { | |||
2525 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2525, __extension__ __PRETTY_FUNCTION__)); | |||
2526 | Inst.addOperand(MCOperand::createImm(unsigned(getCondCode()))); | |||
2527 | } | |||
2528 | ||||
2529 | void addITCondCodeInvOperands(MCInst &Inst, unsigned N) const { | |||
2530 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2530, __extension__ __PRETTY_FUNCTION__)); | |||
2531 | Inst.addOperand(MCOperand::createImm(unsigned(ARMCC::getOppositeCondition(getCondCode())))); | |||
2532 | } | |||
2533 | ||||
2534 | void addCCOutOperands(MCInst &Inst, unsigned N) const { | |||
2535 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2535, __extension__ __PRETTY_FUNCTION__)); | |||
2536 | Inst.addOperand(MCOperand::createReg(getReg())); | |||
2537 | } | |||
2538 | ||||
2539 | void addRegOperands(MCInst &Inst, unsigned N) const { | |||
2540 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2540, __extension__ __PRETTY_FUNCTION__)); | |||
2541 | Inst.addOperand(MCOperand::createReg(getReg())); | |||
2542 | } | |||
2543 | ||||
2544 | void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const { | |||
2545 | assert(N == 3 && "Invalid number of operands!")(static_cast <bool> (N == 3 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 3 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2545, __extension__ __PRETTY_FUNCTION__)); | |||
2546 | assert(isRegShiftedReg() &&(static_cast <bool> (isRegShiftedReg() && "addRegShiftedRegOperands() on non-RegShiftedReg!" ) ? void (0) : __assert_fail ("isRegShiftedReg() && \"addRegShiftedRegOperands() on non-RegShiftedReg!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2547, __extension__ __PRETTY_FUNCTION__)) | |||
2547 | "addRegShiftedRegOperands() on non-RegShiftedReg!")(static_cast <bool> (isRegShiftedReg() && "addRegShiftedRegOperands() on non-RegShiftedReg!" ) ? void (0) : __assert_fail ("isRegShiftedReg() && \"addRegShiftedRegOperands() on non-RegShiftedReg!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2547, __extension__ __PRETTY_FUNCTION__)); | |||
2548 | Inst.addOperand(MCOperand::createReg(RegShiftedReg.SrcReg)); | |||
2549 | Inst.addOperand(MCOperand::createReg(RegShiftedReg.ShiftReg)); | |||
2550 | Inst.addOperand(MCOperand::createImm( | |||
2551 | ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm))); | |||
2552 | } | |||
2553 | ||||
2554 | void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const { | |||
2555 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2555, __extension__ __PRETTY_FUNCTION__)); | |||
2556 | assert(isRegShiftedImm() &&(static_cast <bool> (isRegShiftedImm() && "addRegShiftedImmOperands() on non-RegShiftedImm!" ) ? void (0) : __assert_fail ("isRegShiftedImm() && \"addRegShiftedImmOperands() on non-RegShiftedImm!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2557, __extension__ __PRETTY_FUNCTION__)) | |||
2557 | "addRegShiftedImmOperands() on non-RegShiftedImm!")(static_cast <bool> (isRegShiftedImm() && "addRegShiftedImmOperands() on non-RegShiftedImm!" ) ? void (0) : __assert_fail ("isRegShiftedImm() && \"addRegShiftedImmOperands() on non-RegShiftedImm!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2557, __extension__ __PRETTY_FUNCTION__)); | |||
2558 | Inst.addOperand(MCOperand::createReg(RegShiftedImm.SrcReg)); | |||
2559 | // Shift of #32 is encoded as 0 where permitted | |||
2560 | unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm); | |||
2561 | Inst.addOperand(MCOperand::createImm( | |||
2562 | ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm))); | |||
2563 | } | |||
2564 | ||||
2565 | void addShifterImmOperands(MCInst &Inst, unsigned N) const { | |||
2566 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2566, __extension__ __PRETTY_FUNCTION__)); | |||
2567 | Inst.addOperand(MCOperand::createImm((ShifterImm.isASR << 5) | | |||
2568 | ShifterImm.Imm)); | |||
2569 | } | |||
2570 | ||||
2571 | void addRegListOperands(MCInst &Inst, unsigned N) const { | |||
2572 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2572, __extension__ __PRETTY_FUNCTION__)); | |||
2573 | const SmallVectorImpl<unsigned> &RegList = getRegList(); | |||
2574 | for (SmallVectorImpl<unsigned>::const_iterator | |||
2575 | I = RegList.begin(), E = RegList.end(); I != E; ++I) | |||
2576 | Inst.addOperand(MCOperand::createReg(*I)); | |||
2577 | } | |||
2578 | ||||
2579 | void addRegListWithAPSROperands(MCInst &Inst, unsigned N) const { | |||
2580 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2580, __extension__ __PRETTY_FUNCTION__)); | |||
2581 | const SmallVectorImpl<unsigned> &RegList = getRegList(); | |||
2582 | for (SmallVectorImpl<unsigned>::const_iterator | |||
2583 | I = RegList.begin(), E = RegList.end(); I != E; ++I) | |||
2584 | Inst.addOperand(MCOperand::createReg(*I)); | |||
2585 | } | |||
2586 | ||||
2587 | void addDPRRegListOperands(MCInst &Inst, unsigned N) const { | |||
2588 | addRegListOperands(Inst, N); | |||
2589 | } | |||
2590 | ||||
2591 | void addSPRRegListOperands(MCInst &Inst, unsigned N) const { | |||
2592 | addRegListOperands(Inst, N); | |||
2593 | } | |||
2594 | ||||
2595 | void addFPSRegListWithVPROperands(MCInst &Inst, unsigned N) const { | |||
2596 | addRegListOperands(Inst, N); | |||
2597 | } | |||
2598 | ||||
2599 | void addFPDRegListWithVPROperands(MCInst &Inst, unsigned N) const { | |||
2600 | addRegListOperands(Inst, N); | |||
2601 | } | |||
2602 | ||||
2603 | void addRotImmOperands(MCInst &Inst, unsigned N) const { | |||
2604 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2604, __extension__ __PRETTY_FUNCTION__)); | |||
2605 | // Encoded as val>>3. The printer handles display as 8, 16, 24. | |||
2606 | Inst.addOperand(MCOperand::createImm(RotImm.Imm >> 3)); | |||
2607 | } | |||
2608 | ||||
2609 | void addModImmOperands(MCInst &Inst, unsigned N) const { | |||
2610 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2610, __extension__ __PRETTY_FUNCTION__)); | |||
2611 | ||||
2612 | // Support for fixups (MCFixup) | |||
2613 | if (isImm()) | |||
2614 | return addImmOperands(Inst, N); | |||
2615 | ||||
2616 | Inst.addOperand(MCOperand::createImm(ModImm.Bits | (ModImm.Rot << 7))); | |||
2617 | } | |||
2618 | ||||
2619 | void addModImmNotOperands(MCInst &Inst, unsigned N) const { | |||
2620 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2620, __extension__ __PRETTY_FUNCTION__)); | |||
2621 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
2622 | uint32_t Enc = ARM_AM::getSOImmVal(~CE->getValue()); | |||
2623 | Inst.addOperand(MCOperand::createImm(Enc)); | |||
2624 | } | |||
2625 | ||||
2626 | void addModImmNegOperands(MCInst &Inst, unsigned N) const { | |||
2627 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2627, __extension__ __PRETTY_FUNCTION__)); | |||
2628 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
2629 | uint32_t Enc = ARM_AM::getSOImmVal(-CE->getValue()); | |||
2630 | Inst.addOperand(MCOperand::createImm(Enc)); | |||
2631 | } | |||
2632 | ||||
2633 | void addThumbModImmNeg8_255Operands(MCInst &Inst, unsigned N) const { | |||
2634 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2634, __extension__ __PRETTY_FUNCTION__)); | |||
2635 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
2636 | uint32_t Val = -CE->getValue(); | |||
2637 | Inst.addOperand(MCOperand::createImm(Val)); | |||
2638 | } | |||
2639 | ||||
2640 | void addThumbModImmNeg1_7Operands(MCInst &Inst, unsigned N) const { | |||
2641 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2641, __extension__ __PRETTY_FUNCTION__)); | |||
2642 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
2643 | uint32_t Val = -CE->getValue(); | |||
2644 | Inst.addOperand(MCOperand::createImm(Val)); | |||
2645 | } | |||
2646 | ||||
2647 | void addBitfieldOperands(MCInst &Inst, unsigned N) const { | |||
2648 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2648, __extension__ __PRETTY_FUNCTION__)); | |||
2649 | // Munge the lsb/width into a bitfield mask. | |||
2650 | unsigned lsb = Bitfield.LSB; | |||
2651 | unsigned width = Bitfield.Width; | |||
2652 | // Make a 32-bit mask w/ the referenced bits clear and all other bits set. | |||
2653 | uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >> | |||
2654 | (32 - (lsb + width))); | |||
2655 | Inst.addOperand(MCOperand::createImm(Mask)); | |||
2656 | } | |||
2657 | ||||
2658 | void addImmOperands(MCInst &Inst, unsigned N) const { | |||
2659 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2659, __extension__ __PRETTY_FUNCTION__)); | |||
2660 | addExpr(Inst, getImm()); | |||
2661 | } | |||
2662 | ||||
2663 | void addFBits16Operands(MCInst &Inst, unsigned N) const { | |||
2664 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2664, __extension__ __PRETTY_FUNCTION__)); | |||
2665 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
2666 | Inst.addOperand(MCOperand::createImm(16 - CE->getValue())); | |||
2667 | } | |||
2668 | ||||
2669 | void addFBits32Operands(MCInst &Inst, unsigned N) const { | |||
2670 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2670, __extension__ __PRETTY_FUNCTION__)); | |||
2671 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
2672 | Inst.addOperand(MCOperand::createImm(32 - CE->getValue())); | |||
2673 | } | |||
2674 | ||||
2675 | void addFPImmOperands(MCInst &Inst, unsigned N) const { | |||
2676 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2676, __extension__ __PRETTY_FUNCTION__)); | |||
2677 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
2678 | int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue())); | |||
2679 | Inst.addOperand(MCOperand::createImm(Val)); | |||
2680 | } | |||
2681 | ||||
2682 | void addImm8s4Operands(MCInst &Inst, unsigned N) const { | |||
2683 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2683, __extension__ __PRETTY_FUNCTION__)); | |||
2684 | // FIXME: We really want to scale the value here, but the LDRD/STRD | |||
2685 | // instruction don't encode operands that way yet. | |||
2686 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
2687 | Inst.addOperand(MCOperand::createImm(CE->getValue())); | |||
2688 | } | |||
2689 | ||||
2690 | void addImm7s4Operands(MCInst &Inst, unsigned N) const { | |||
2691 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2691, __extension__ __PRETTY_FUNCTION__)); | |||
2692 | // FIXME: We really want to scale the value here, but the VSTR/VLDR_VSYSR | |||
2693 | // instruction don't encode operands that way yet. | |||
2694 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
2695 | Inst.addOperand(MCOperand::createImm(CE->getValue())); | |||
2696 | } | |||
2697 | ||||
2698 | void addImm7Shift0Operands(MCInst &Inst, unsigned N) const { | |||
2699 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2699, __extension__ __PRETTY_FUNCTION__)); | |||
2700 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
2701 | Inst.addOperand(MCOperand::createImm(CE->getValue())); | |||
2702 | } | |||
2703 | ||||
2704 | void addImm7Shift1Operands(MCInst &Inst, unsigned N) const { | |||
2705 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2705, __extension__ __PRETTY_FUNCTION__)); | |||
2706 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
2707 | Inst.addOperand(MCOperand::createImm(CE->getValue())); | |||
2708 | } | |||
2709 | ||||
2710 | void addImm7Shift2Operands(MCInst &Inst, unsigned N) const { | |||
2711 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2711, __extension__ __PRETTY_FUNCTION__)); | |||
2712 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
2713 | Inst.addOperand(MCOperand::createImm(CE->getValue())); | |||
2714 | } | |||
2715 | ||||
2716 | void addImm7Operands(MCInst &Inst, unsigned N) const { | |||
2717 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2717, __extension__ __PRETTY_FUNCTION__)); | |||
2718 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
2719 | Inst.addOperand(MCOperand::createImm(CE->getValue())); | |||
2720 | } | |||
2721 | ||||
2722 | void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const { | |||
2723 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2723, __extension__ __PRETTY_FUNCTION__)); | |||
2724 | // The immediate is scaled by four in the encoding and is stored | |||
2725 | // in the MCInst as such. Lop off the low two bits here. | |||
2726 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
2727 | Inst.addOperand(MCOperand::createImm(CE->getValue() / 4)); | |||
2728 | } | |||
2729 | ||||
2730 | void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const { | |||
2731 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2731, __extension__ __PRETTY_FUNCTION__)); | |||
2732 | // The immediate is scaled by four in the encoding and is stored | |||
2733 | // in the MCInst as such. Lop off the low two bits here. | |||
2734 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
2735 | Inst.addOperand(MCOperand::createImm(-(CE->getValue() / 4))); | |||
2736 | } | |||
2737 | ||||
2738 | void addImm0_508s4Operands(MCInst &Inst, unsigned N) const { | |||
2739 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2739, __extension__ __PRETTY_FUNCTION__)); | |||
2740 | // The immediate is scaled by four in the encoding and is stored | |||
2741 | // in the MCInst as such. Lop off the low two bits here. | |||
2742 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
2743 | Inst.addOperand(MCOperand::createImm(CE->getValue() / 4)); | |||
2744 | } | |||
2745 | ||||
2746 | void addImm1_16Operands(MCInst &Inst, unsigned N) const { | |||
2747 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2747, __extension__ __PRETTY_FUNCTION__)); | |||
2748 | // The constant encodes as the immediate-1, and we store in the instruction | |||
2749 | // the bits as encoded, so subtract off one here. | |||
2750 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
2751 | Inst.addOperand(MCOperand::createImm(CE->getValue() - 1)); | |||
2752 | } | |||
2753 | ||||
2754 | void addImm1_32Operands(MCInst &Inst, unsigned N) const { | |||
2755 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2755, __extension__ __PRETTY_FUNCTION__)); | |||
2756 | // The constant encodes as the immediate-1, and we store in the instruction | |||
2757 | // the bits as encoded, so subtract off one here. | |||
2758 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
2759 | Inst.addOperand(MCOperand::createImm(CE->getValue() - 1)); | |||
2760 | } | |||
2761 | ||||
2762 | void addImmThumbSROperands(MCInst &Inst, unsigned N) const { | |||
2763 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2763, __extension__ __PRETTY_FUNCTION__)); | |||
2764 | // The constant encodes as the immediate, except for 32, which encodes as | |||
2765 | // zero. | |||
2766 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
2767 | unsigned Imm = CE->getValue(); | |||
2768 | Inst.addOperand(MCOperand::createImm((Imm == 32 ? 0 : Imm))); | |||
2769 | } | |||
2770 | ||||
2771 | void addPKHASRImmOperands(MCInst &Inst, unsigned N) const { | |||
2772 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2772, __extension__ __PRETTY_FUNCTION__)); | |||
2773 | // An ASR value of 32 encodes as 0, so that's how we want to add it to | |||
2774 | // the instruction as well. | |||
2775 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
2776 | int Val = CE->getValue(); | |||
2777 | Inst.addOperand(MCOperand::createImm(Val == 32 ? 0 : Val)); | |||
2778 | } | |||
2779 | ||||
2780 | void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const { | |||
2781 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2781, __extension__ __PRETTY_FUNCTION__)); | |||
2782 | // The operand is actually a t2_so_imm, but we have its bitwise | |||
2783 | // negation in the assembly source, so twiddle it here. | |||
2784 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
2785 | Inst.addOperand(MCOperand::createImm(~(uint32_t)CE->getValue())); | |||
2786 | } | |||
2787 | ||||
2788 | void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const { | |||
2789 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2789, __extension__ __PRETTY_FUNCTION__)); | |||
2790 | // The operand is actually a t2_so_imm, but we have its | |||
2791 | // negation in the assembly source, so twiddle it here. | |||
2792 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
2793 | Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue())); | |||
2794 | } | |||
2795 | ||||
2796 | void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const { | |||
2797 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2797, __extension__ __PRETTY_FUNCTION__)); | |||
2798 | // The operand is actually an imm0_4095, but we have its | |||
2799 | // negation in the assembly source, so twiddle it here. | |||
2800 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
2801 | Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue())); | |||
2802 | } | |||
2803 | ||||
2804 | void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const { | |||
2805 | if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) { | |||
2806 | Inst.addOperand(MCOperand::createImm(CE->getValue() >> 2)); | |||
2807 | return; | |||
2808 | } | |||
2809 | const MCSymbolRefExpr *SR = cast<MCSymbolRefExpr>(Imm.Val); | |||
2810 | Inst.addOperand(MCOperand::createExpr(SR)); | |||
2811 | } | |||
2812 | ||||
2813 | void addThumbMemPCOperands(MCInst &Inst, unsigned N) const { | |||
2814 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2814, __extension__ __PRETTY_FUNCTION__)); | |||
2815 | if (isImm()) { | |||
2816 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
2817 | if (CE) { | |||
2818 | Inst.addOperand(MCOperand::createImm(CE->getValue())); | |||
2819 | return; | |||
2820 | } | |||
2821 | const MCSymbolRefExpr *SR = cast<MCSymbolRefExpr>(Imm.Val); | |||
2822 | Inst.addOperand(MCOperand::createExpr(SR)); | |||
2823 | return; | |||
2824 | } | |||
2825 | ||||
2826 | assert(isGPRMem() && "Unknown value type!")(static_cast <bool> (isGPRMem() && "Unknown value type!" ) ? void (0) : __assert_fail ("isGPRMem() && \"Unknown value type!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2826, __extension__ __PRETTY_FUNCTION__)); | |||
2827 | assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!")(static_cast <bool> (isa<MCConstantExpr>(Memory.OffsetImm ) && "Unknown value type!") ? void (0) : __assert_fail ("isa<MCConstantExpr>(Memory.OffsetImm) && \"Unknown value type!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2827, __extension__ __PRETTY_FUNCTION__)); | |||
2828 | if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) | |||
2829 | Inst.addOperand(MCOperand::createImm(CE->getValue())); | |||
2830 | else | |||
2831 | Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm)); | |||
2832 | } | |||
2833 | ||||
2834 | void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const { | |||
2835 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2835, __extension__ __PRETTY_FUNCTION__)); | |||
2836 | Inst.addOperand(MCOperand::createImm(unsigned(getMemBarrierOpt()))); | |||
2837 | } | |||
2838 | ||||
2839 | void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const { | |||
2840 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2840, __extension__ __PRETTY_FUNCTION__)); | |||
2841 | Inst.addOperand(MCOperand::createImm(unsigned(getInstSyncBarrierOpt()))); | |||
2842 | } | |||
2843 | ||||
2844 | void addTraceSyncBarrierOptOperands(MCInst &Inst, unsigned N) const { | |||
2845 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2845, __extension__ __PRETTY_FUNCTION__)); | |||
2846 | Inst.addOperand(MCOperand::createImm(unsigned(getTraceSyncBarrierOpt()))); | |||
2847 | } | |||
2848 | ||||
2849 | void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const { | |||
2850 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2850, __extension__ __PRETTY_FUNCTION__)); | |||
2851 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); | |||
2852 | } | |||
2853 | ||||
2854 | void addMemNoOffsetT2Operands(MCInst &Inst, unsigned N) const { | |||
2855 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2855, __extension__ __PRETTY_FUNCTION__)); | |||
2856 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); | |||
2857 | } | |||
2858 | ||||
2859 | void addMemNoOffsetT2NoSpOperands(MCInst &Inst, unsigned N) const { | |||
2860 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2860, __extension__ __PRETTY_FUNCTION__)); | |||
2861 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); | |||
2862 | } | |||
2863 | ||||
2864 | void addMemNoOffsetTOperands(MCInst &Inst, unsigned N) const { | |||
2865 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2865, __extension__ __PRETTY_FUNCTION__)); | |||
2866 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); | |||
2867 | } | |||
2868 | ||||
2869 | void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const { | |||
2870 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2870, __extension__ __PRETTY_FUNCTION__)); | |||
2871 | if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) | |||
2872 | Inst.addOperand(MCOperand::createImm(CE->getValue())); | |||
2873 | else | |||
2874 | Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm)); | |||
2875 | } | |||
2876 | ||||
2877 | void addAdrLabelOperands(MCInst &Inst, unsigned N) const { | |||
2878 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2878, __extension__ __PRETTY_FUNCTION__)); | |||
2879 | assert(isImm() && "Not an immediate!")(static_cast <bool> (isImm() && "Not an immediate!" ) ? void (0) : __assert_fail ("isImm() && \"Not an immediate!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2879, __extension__ __PRETTY_FUNCTION__)); | |||
2880 | ||||
2881 | // If we have an immediate that's not a constant, treat it as a label | |||
2882 | // reference needing a fixup. | |||
2883 | if (!isa<MCConstantExpr>(getImm())) { | |||
2884 | Inst.addOperand(MCOperand::createExpr(getImm())); | |||
2885 | return; | |||
2886 | } | |||
2887 | ||||
2888 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
2889 | int Val = CE->getValue(); | |||
2890 | Inst.addOperand(MCOperand::createImm(Val)); | |||
2891 | } | |||
2892 | ||||
2893 | void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const { | |||
2894 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2894, __extension__ __PRETTY_FUNCTION__)); | |||
2895 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); | |||
2896 | Inst.addOperand(MCOperand::createImm(Memory.Alignment)); | |||
2897 | } | |||
2898 | ||||
2899 | void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const { | |||
2900 | addAlignedMemoryOperands(Inst, N); | |||
2901 | } | |||
2902 | ||||
2903 | void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const { | |||
2904 | addAlignedMemoryOperands(Inst, N); | |||
2905 | } | |||
2906 | ||||
2907 | void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const { | |||
2908 | addAlignedMemoryOperands(Inst, N); | |||
2909 | } | |||
2910 | ||||
2911 | void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const { | |||
2912 | addAlignedMemoryOperands(Inst, N); | |||
2913 | } | |||
2914 | ||||
2915 | void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const { | |||
2916 | addAlignedMemoryOperands(Inst, N); | |||
2917 | } | |||
2918 | ||||
2919 | void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const { | |||
2920 | addAlignedMemoryOperands(Inst, N); | |||
2921 | } | |||
2922 | ||||
2923 | void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const { | |||
2924 | addAlignedMemoryOperands(Inst, N); | |||
2925 | } | |||
2926 | ||||
2927 | void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const { | |||
2928 | addAlignedMemoryOperands(Inst, N); | |||
2929 | } | |||
2930 | ||||
2931 | void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const { | |||
2932 | addAlignedMemoryOperands(Inst, N); | |||
2933 | } | |||
2934 | ||||
2935 | void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const { | |||
2936 | addAlignedMemoryOperands(Inst, N); | |||
2937 | } | |||
2938 | ||||
2939 | void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const { | |||
2940 | addAlignedMemoryOperands(Inst, N); | |||
2941 | } | |||
2942 | ||||
2943 | void addAddrMode2Operands(MCInst &Inst, unsigned N) const { | |||
2944 | assert(N == 3 && "Invalid number of operands!")(static_cast <bool> (N == 3 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 3 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2944, __extension__ __PRETTY_FUNCTION__)); | |||
2945 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); | |||
2946 | Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum)); | |||
2947 | if (!Memory.OffsetRegNum) { | |||
2948 | if (!Memory.OffsetImm) | |||
2949 | Inst.addOperand(MCOperand::createImm(0)); | |||
2950 | else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) { | |||
2951 | int32_t Val = CE->getValue(); | |||
2952 | ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; | |||
2953 | // Special case for #-0 | |||
2954 | if (Val == std::numeric_limits<int32_t>::min()) | |||
2955 | Val = 0; | |||
2956 | if (Val < 0) | |||
2957 | Val = -Val; | |||
2958 | Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); | |||
2959 | Inst.addOperand(MCOperand::createImm(Val)); | |||
2960 | } else | |||
2961 | Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm)); | |||
2962 | } else { | |||
2963 | // For register offset, we encode the shift type and negation flag | |||
2964 | // here. | |||
2965 | int32_t Val = | |||
2966 | ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, | |||
2967 | Memory.ShiftImm, Memory.ShiftType); | |||
2968 | Inst.addOperand(MCOperand::createImm(Val)); | |||
2969 | } | |||
2970 | } | |||
2971 | ||||
2972 | void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const { | |||
2973 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2973, __extension__ __PRETTY_FUNCTION__)); | |||
2974 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
2975 | assert(CE && "non-constant AM2OffsetImm operand!")(static_cast <bool> (CE && "non-constant AM2OffsetImm operand!" ) ? void (0) : __assert_fail ("CE && \"non-constant AM2OffsetImm operand!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2975, __extension__ __PRETTY_FUNCTION__)); | |||
2976 | int32_t Val = CE->getValue(); | |||
2977 | ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; | |||
2978 | // Special case for #-0 | |||
2979 | if (Val == std::numeric_limits<int32_t>::min()) Val = 0; | |||
2980 | if (Val < 0) Val = -Val; | |||
2981 | Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); | |||
2982 | Inst.addOperand(MCOperand::createReg(0)); | |||
2983 | Inst.addOperand(MCOperand::createImm(Val)); | |||
2984 | } | |||
2985 | ||||
2986 | void addAddrMode3Operands(MCInst &Inst, unsigned N) const { | |||
2987 | assert(N == 3 && "Invalid number of operands!")(static_cast <bool> (N == 3 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 3 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 2987, __extension__ __PRETTY_FUNCTION__)); | |||
2988 | // If we have an immediate that's not a constant, treat it as a label | |||
2989 | // reference needing a fixup. If it is a constant, it's something else | |||
2990 | // and we reject it. | |||
2991 | if (isImm()) { | |||
2992 | Inst.addOperand(MCOperand::createExpr(getImm())); | |||
2993 | Inst.addOperand(MCOperand::createReg(0)); | |||
2994 | Inst.addOperand(MCOperand::createImm(0)); | |||
2995 | return; | |||
2996 | } | |||
2997 | ||||
2998 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); | |||
2999 | Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum)); | |||
3000 | if (!Memory.OffsetRegNum) { | |||
3001 | if (!Memory.OffsetImm) | |||
3002 | Inst.addOperand(MCOperand::createImm(0)); | |||
3003 | else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) { | |||
3004 | int32_t Val = CE->getValue(); | |||
3005 | ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; | |||
3006 | // Special case for #-0 | |||
3007 | if (Val == std::numeric_limits<int32_t>::min()) | |||
3008 | Val = 0; | |||
3009 | if (Val < 0) | |||
3010 | Val = -Val; | |||
3011 | Val = ARM_AM::getAM3Opc(AddSub, Val); | |||
3012 | Inst.addOperand(MCOperand::createImm(Val)); | |||
3013 | } else | |||
3014 | Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm)); | |||
3015 | } else { | |||
3016 | // For register offset, we encode the shift type and negation flag | |||
3017 | // here. | |||
3018 | int32_t Val = | |||
3019 | ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0); | |||
3020 | Inst.addOperand(MCOperand::createImm(Val)); | |||
3021 | } | |||
3022 | } | |||
3023 | ||||
3024 | void addAM3OffsetOperands(MCInst &Inst, unsigned N) const { | |||
3025 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3025, __extension__ __PRETTY_FUNCTION__)); | |||
3026 | if (Kind == k_PostIndexRegister) { | |||
3027 | int32_t Val = | |||
3028 | ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0); | |||
3029 | Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum)); | |||
3030 | Inst.addOperand(MCOperand::createImm(Val)); | |||
3031 | return; | |||
3032 | } | |||
3033 | ||||
3034 | // Constant offset. | |||
3035 | const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm()); | |||
3036 | int32_t Val = CE->getValue(); | |||
3037 | ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; | |||
3038 | // Special case for #-0 | |||
3039 | if (Val == std::numeric_limits<int32_t>::min()) Val = 0; | |||
3040 | if (Val < 0) Val = -Val; | |||
3041 | Val = ARM_AM::getAM3Opc(AddSub, Val); | |||
3042 | Inst.addOperand(MCOperand::createReg(0)); | |||
3043 | Inst.addOperand(MCOperand::createImm(Val)); | |||
3044 | } | |||
3045 | ||||
3046 | void addAddrMode5Operands(MCInst &Inst, unsigned N) const { | |||
3047 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3047, __extension__ __PRETTY_FUNCTION__)); | |||
3048 | // If we have an immediate that's not a constant, treat it as a label | |||
3049 | // reference needing a fixup. If it is a constant, it's something else | |||
3050 | // and we reject it. | |||
3051 | if (isImm()) { | |||
3052 | Inst.addOperand(MCOperand::createExpr(getImm())); | |||
3053 | Inst.addOperand(MCOperand::createImm(0)); | |||
3054 | return; | |||
3055 | } | |||
3056 | ||||
3057 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); | |||
3058 | if (!Memory.OffsetImm) | |||
3059 | Inst.addOperand(MCOperand::createImm(0)); | |||
3060 | else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) { | |||
3061 | // The lower two bits are always zero and as such are not encoded. | |||
3062 | int32_t Val = CE->getValue() / 4; | |||
3063 | ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; | |||
3064 | // Special case for #-0 | |||
3065 | if (Val == std::numeric_limits<int32_t>::min()) | |||
3066 | Val = 0; | |||
3067 | if (Val < 0) | |||
3068 | Val = -Val; | |||
3069 | Val = ARM_AM::getAM5Opc(AddSub, Val); | |||
3070 | Inst.addOperand(MCOperand::createImm(Val)); | |||
3071 | } else | |||
3072 | Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm)); | |||
3073 | } | |||
3074 | ||||
3075 | void addAddrMode5FP16Operands(MCInst &Inst, unsigned N) const { | |||
3076 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3076, __extension__ __PRETTY_FUNCTION__)); | |||
3077 | // If we have an immediate that's not a constant, treat it as a label | |||
3078 | // reference needing a fixup. If it is a constant, it's something else | |||
3079 | // and we reject it. | |||
3080 | if (isImm()) { | |||
3081 | Inst.addOperand(MCOperand::createExpr(getImm())); | |||
3082 | Inst.addOperand(MCOperand::createImm(0)); | |||
3083 | return; | |||
3084 | } | |||
3085 | ||||
3086 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); | |||
3087 | // The lower bit is always zero and as such is not encoded. | |||
3088 | if (!Memory.OffsetImm) | |||
3089 | Inst.addOperand(MCOperand::createImm(0)); | |||
3090 | else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) { | |||
3091 | int32_t Val = CE->getValue() / 2; | |||
3092 | ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; | |||
3093 | // Special case for #-0 | |||
3094 | if (Val == std::numeric_limits<int32_t>::min()) | |||
3095 | Val = 0; | |||
3096 | if (Val < 0) | |||
3097 | Val = -Val; | |||
3098 | Val = ARM_AM::getAM5FP16Opc(AddSub, Val); | |||
3099 | Inst.addOperand(MCOperand::createImm(Val)); | |||
3100 | } else | |||
3101 | Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm)); | |||
3102 | } | |||
3103 | ||||
3104 | void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const { | |||
3105 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3105, __extension__ __PRETTY_FUNCTION__)); | |||
3106 | // If we have an immediate that's not a constant, treat it as a label | |||
3107 | // reference needing a fixup. If it is a constant, it's something else | |||
3108 | // and we reject it. | |||
3109 | if (isImm()) { | |||
3110 | Inst.addOperand(MCOperand::createExpr(getImm())); | |||
3111 | Inst.addOperand(MCOperand::createImm(0)); | |||
3112 | return; | |||
3113 | } | |||
3114 | ||||
3115 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); | |||
3116 | addExpr(Inst, Memory.OffsetImm); | |||
3117 | } | |||
3118 | ||||
3119 | void addMemImm7s4OffsetOperands(MCInst &Inst, unsigned N) const { | |||
3120 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3120, __extension__ __PRETTY_FUNCTION__)); | |||
3121 | // If we have an immediate that's not a constant, treat it as a label | |||
3122 | // reference needing a fixup. If it is a constant, it's something else | |||
3123 | // and we reject it. | |||
3124 | if (isImm()) { | |||
3125 | Inst.addOperand(MCOperand::createExpr(getImm())); | |||
3126 | Inst.addOperand(MCOperand::createImm(0)); | |||
3127 | return; | |||
3128 | } | |||
3129 | ||||
3130 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); | |||
3131 | addExpr(Inst, Memory.OffsetImm); | |||
3132 | } | |||
3133 | ||||
3134 | void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const { | |||
3135 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3135, __extension__ __PRETTY_FUNCTION__)); | |||
3136 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); | |||
3137 | if (!Memory.OffsetImm) | |||
3138 | Inst.addOperand(MCOperand::createImm(0)); | |||
3139 | else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) | |||
3140 | // The lower two bits are always zero and as such are not encoded. | |||
3141 | Inst.addOperand(MCOperand::createImm(CE->getValue() / 4)); | |||
3142 | else | |||
3143 | Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm)); | |||
3144 | } | |||
3145 | ||||
3146 | void addMemImmOffsetOperands(MCInst &Inst, unsigned N) const { | |||
3147 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3147, __extension__ __PRETTY_FUNCTION__)); | |||
3148 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); | |||
3149 | addExpr(Inst, Memory.OffsetImm); | |||
3150 | } | |||
3151 | ||||
3152 | void addMemRegRQOffsetOperands(MCInst &Inst, unsigned N) const { | |||
3153 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3153, __extension__ __PRETTY_FUNCTION__)); | |||
3154 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); | |||
3155 | Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum)); | |||
3156 | } | |||
3157 | ||||
3158 | void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const { | |||
3159 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3159, __extension__ __PRETTY_FUNCTION__)); | |||
3160 | // If this is an immediate, it's a label reference. | |||
3161 | if (isImm()) { | |||
3162 | addExpr(Inst, getImm()); | |||
3163 | Inst.addOperand(MCOperand::createImm(0)); | |||
3164 | return; | |||
3165 | } | |||
3166 | ||||
3167 | // Otherwise, it's a normal memory reg+offset. | |||
3168 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); | |||
3169 | addExpr(Inst, Memory.OffsetImm); | |||
3170 | } | |||
3171 | ||||
3172 | void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const { | |||
3173 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3173, __extension__ __PRETTY_FUNCTION__)); | |||
3174 | // If this is an immediate, it's a label reference. | |||
3175 | if (isImm()) { | |||
3176 | addExpr(Inst, getImm()); | |||
3177 | Inst.addOperand(MCOperand::createImm(0)); | |||
3178 | return; | |||
3179 | } | |||
3180 | ||||
3181 | // Otherwise, it's a normal memory reg+offset. | |||
3182 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); | |||
3183 | addExpr(Inst, Memory.OffsetImm); | |||
3184 | } | |||
3185 | ||||
3186 | void addConstPoolAsmImmOperands(MCInst &Inst, unsigned N) const { | |||
3187 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3187, __extension__ __PRETTY_FUNCTION__)); | |||
3188 | // This is container for the immediate that we will create the constant | |||
3189 | // pool from | |||
3190 | addExpr(Inst, getConstantPoolImm()); | |||
3191 | } | |||
3192 | ||||
3193 | void addMemTBBOperands(MCInst &Inst, unsigned N) const { | |||
3194 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3194, __extension__ __PRETTY_FUNCTION__)); | |||
3195 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); | |||
3196 | Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum)); | |||
3197 | } | |||
3198 | ||||
3199 | void addMemTBHOperands(MCInst &Inst, unsigned N) const { | |||
3200 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3200, __extension__ __PRETTY_FUNCTION__)); | |||
3201 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); | |||
3202 | Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum)); | |||
3203 | } | |||
3204 | ||||
3205 | void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const { | |||
3206 | assert(N == 3 && "Invalid number of operands!")(static_cast <bool> (N == 3 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 3 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3206, __extension__ __PRETTY_FUNCTION__)); | |||
3207 | unsigned Val = | |||
3208 | ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, | |||
3209 | Memory.ShiftImm, Memory.ShiftType); | |||
3210 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); | |||
3211 | Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum)); | |||
3212 | Inst.addOperand(MCOperand::createImm(Val)); | |||
3213 | } | |||
3214 | ||||
3215 | void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const { | |||
3216 | assert(N == 3 && "Invalid number of operands!")(static_cast <bool> (N == 3 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 3 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3216, __extension__ __PRETTY_FUNCTION__)); | |||
3217 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); | |||
3218 | Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum)); | |||
3219 | Inst.addOperand(MCOperand::createImm(Memory.ShiftImm)); | |||
3220 | } | |||
3221 | ||||
3222 | void addMemThumbRROperands(MCInst &Inst, unsigned N) const { | |||
3223 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3223, __extension__ __PRETTY_FUNCTION__)); | |||
3224 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); | |||
3225 | Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum)); | |||
3226 | } | |||
3227 | ||||
3228 | void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const { | |||
3229 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3229, __extension__ __PRETTY_FUNCTION__)); | |||
3230 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); | |||
3231 | if (!Memory.OffsetImm) | |||
3232 | Inst.addOperand(MCOperand::createImm(0)); | |||
3233 | else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) | |||
3234 | // The lower two bits are always zero and as such are not encoded. | |||
3235 | Inst.addOperand(MCOperand::createImm(CE->getValue() / 4)); | |||
3236 | else | |||
3237 | Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm)); | |||
3238 | } | |||
3239 | ||||
3240 | void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const { | |||
3241 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3241, __extension__ __PRETTY_FUNCTION__)); | |||
3242 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); | |||
3243 | if (!Memory.OffsetImm) | |||
3244 | Inst.addOperand(MCOperand::createImm(0)); | |||
3245 | else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) | |||
3246 | Inst.addOperand(MCOperand::createImm(CE->getValue() / 2)); | |||
3247 | else | |||
3248 | Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm)); | |||
3249 | } | |||
3250 | ||||
3251 | void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const { | |||
3252 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3252, __extension__ __PRETTY_FUNCTION__)); | |||
3253 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); | |||
3254 | addExpr(Inst, Memory.OffsetImm); | |||
3255 | } | |||
3256 | ||||
3257 | void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const { | |||
3258 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3258, __extension__ __PRETTY_FUNCTION__)); | |||
3259 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); | |||
3260 | if (!Memory.OffsetImm) | |||
3261 | Inst.addOperand(MCOperand::createImm(0)); | |||
3262 | else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) | |||
3263 | // The lower two bits are always zero and as such are not encoded. | |||
3264 | Inst.addOperand(MCOperand::createImm(CE->getValue() / 4)); | |||
3265 | else | |||
3266 | Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm)); | |||
3267 | } | |||
3268 | ||||
3269 | void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const { | |||
3270 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3270, __extension__ __PRETTY_FUNCTION__)); | |||
3271 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
3272 | assert(CE && "non-constant post-idx-imm8 operand!")(static_cast <bool> (CE && "non-constant post-idx-imm8 operand!" ) ? void (0) : __assert_fail ("CE && \"non-constant post-idx-imm8 operand!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3272, __extension__ __PRETTY_FUNCTION__)); | |||
3273 | int Imm = CE->getValue(); | |||
3274 | bool isAdd = Imm >= 0; | |||
3275 | if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0; | |||
3276 | Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8; | |||
3277 | Inst.addOperand(MCOperand::createImm(Imm)); | |||
3278 | } | |||
3279 | ||||
3280 | void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const { | |||
3281 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3281, __extension__ __PRETTY_FUNCTION__)); | |||
3282 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
3283 | assert(CE && "non-constant post-idx-imm8s4 operand!")(static_cast <bool> (CE && "non-constant post-idx-imm8s4 operand!" ) ? void (0) : __assert_fail ("CE && \"non-constant post-idx-imm8s4 operand!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3283, __extension__ __PRETTY_FUNCTION__)); | |||
3284 | int Imm = CE->getValue(); | |||
3285 | bool isAdd = Imm >= 0; | |||
3286 | if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0; | |||
3287 | // Immediate is scaled by 4. | |||
3288 | Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8; | |||
3289 | Inst.addOperand(MCOperand::createImm(Imm)); | |||
3290 | } | |||
3291 | ||||
3292 | void addPostIdxRegOperands(MCInst &Inst, unsigned N) const { | |||
3293 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3293, __extension__ __PRETTY_FUNCTION__)); | |||
3294 | Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum)); | |||
3295 | Inst.addOperand(MCOperand::createImm(PostIdxReg.isAdd)); | |||
3296 | } | |||
3297 | ||||
3298 | void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const { | |||
3299 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3299, __extension__ __PRETTY_FUNCTION__)); | |||
3300 | Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum)); | |||
3301 | // The sign, shift type, and shift amount are encoded in a single operand | |||
3302 | // using the AM2 encoding helpers. | |||
3303 | ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub; | |||
3304 | unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm, | |||
3305 | PostIdxReg.ShiftTy); | |||
3306 | Inst.addOperand(MCOperand::createImm(Imm)); | |||
3307 | } | |||
3308 | ||||
3309 | void addPowerTwoOperands(MCInst &Inst, unsigned N) const { | |||
3310 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3310, __extension__ __PRETTY_FUNCTION__)); | |||
3311 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
3312 | Inst.addOperand(MCOperand::createImm(CE->getValue())); | |||
3313 | } | |||
3314 | ||||
3315 | void addMSRMaskOperands(MCInst &Inst, unsigned N) const { | |||
3316 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3316, __extension__ __PRETTY_FUNCTION__)); | |||
3317 | Inst.addOperand(MCOperand::createImm(unsigned(getMSRMask()))); | |||
3318 | } | |||
3319 | ||||
3320 | void addBankedRegOperands(MCInst &Inst, unsigned N) const { | |||
3321 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3321, __extension__ __PRETTY_FUNCTION__)); | |||
3322 | Inst.addOperand(MCOperand::createImm(unsigned(getBankedReg()))); | |||
3323 | } | |||
3324 | ||||
3325 | void addProcIFlagsOperands(MCInst &Inst, unsigned N) const { | |||
3326 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3326, __extension__ __PRETTY_FUNCTION__)); | |||
3327 | Inst.addOperand(MCOperand::createImm(unsigned(getProcIFlags()))); | |||
3328 | } | |||
3329 | ||||
3330 | void addVecListOperands(MCInst &Inst, unsigned N) const { | |||
3331 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3331, __extension__ __PRETTY_FUNCTION__)); | |||
3332 | Inst.addOperand(MCOperand::createReg(VectorList.RegNum)); | |||
3333 | } | |||
3334 | ||||
3335 | void addMVEVecListOperands(MCInst &Inst, unsigned N) const { | |||
3336 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3336, __extension__ __PRETTY_FUNCTION__)); | |||
3337 | ||||
3338 | // When we come here, the VectorList field will identify a range | |||
3339 | // of q-registers by its base register and length, and it will | |||
3340 | // have already been error-checked to be the expected length of | |||
3341 | // range and contain only q-regs in the range q0-q7. So we can | |||
3342 | // count on the base register being in the range q0-q6 (for 2 | |||
3343 | // regs) or q0-q4 (for 4) | |||
3344 | // | |||
3345 | // The MVE instructions taking a register range of this kind will | |||
3346 | // need an operand in the MQQPR or MQQQQPR class, representing the | |||
3347 | // entire range as a unit. So we must translate into that class, | |||
3348 | // by finding the index of the base register in the MQPR reg | |||
3349 | // class, and returning the super-register at the corresponding | |||
3350 | // index in the target class. | |||
3351 | ||||
3352 | const MCRegisterClass *RC_in = &ARMMCRegisterClasses[ARM::MQPRRegClassID]; | |||
3353 | const MCRegisterClass *RC_out = | |||
3354 | (VectorList.Count == 2) ? &ARMMCRegisterClasses[ARM::MQQPRRegClassID] | |||
3355 | : &ARMMCRegisterClasses[ARM::MQQQQPRRegClassID]; | |||
3356 | ||||
3357 | unsigned I, E = RC_out->getNumRegs(); | |||
3358 | for (I = 0; I < E; I++) | |||
3359 | if (RC_in->getRegister(I) == VectorList.RegNum) | |||
3360 | break; | |||
3361 | assert(I < E && "Invalid vector list start register!")(static_cast <bool> (I < E && "Invalid vector list start register!" ) ? void (0) : __assert_fail ("I < E && \"Invalid vector list start register!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3361, __extension__ __PRETTY_FUNCTION__)); | |||
3362 | ||||
3363 | Inst.addOperand(MCOperand::createReg(RC_out->getRegister(I))); | |||
3364 | } | |||
3365 | ||||
3366 | void addVecListIndexedOperands(MCInst &Inst, unsigned N) const { | |||
3367 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3367, __extension__ __PRETTY_FUNCTION__)); | |||
3368 | Inst.addOperand(MCOperand::createReg(VectorList.RegNum)); | |||
3369 | Inst.addOperand(MCOperand::createImm(VectorList.LaneIndex)); | |||
3370 | } | |||
3371 | ||||
3372 | void addVectorIndex8Operands(MCInst &Inst, unsigned N) const { | |||
3373 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3373, __extension__ __PRETTY_FUNCTION__)); | |||
3374 | Inst.addOperand(MCOperand::createImm(getVectorIndex())); | |||
3375 | } | |||
3376 | ||||
3377 | void addVectorIndex16Operands(MCInst &Inst, unsigned N) const { | |||
3378 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3378, __extension__ __PRETTY_FUNCTION__)); | |||
3379 | Inst.addOperand(MCOperand::createImm(getVectorIndex())); | |||
3380 | } | |||
3381 | ||||
3382 | void addVectorIndex32Operands(MCInst &Inst, unsigned N) const { | |||
3383 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3383, __extension__ __PRETTY_FUNCTION__)); | |||
3384 | Inst.addOperand(MCOperand::createImm(getVectorIndex())); | |||
3385 | } | |||
3386 | ||||
3387 | void addVectorIndex64Operands(MCInst &Inst, unsigned N) const { | |||
3388 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3388, __extension__ __PRETTY_FUNCTION__)); | |||
3389 | Inst.addOperand(MCOperand::createImm(getVectorIndex())); | |||
3390 | } | |||
3391 | ||||
3392 | void addMVEVectorIndexOperands(MCInst &Inst, unsigned N) const { | |||
3393 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3393, __extension__ __PRETTY_FUNCTION__)); | |||
3394 | Inst.addOperand(MCOperand::createImm(getVectorIndex())); | |||
3395 | } | |||
3396 | ||||
3397 | void addMVEPairVectorIndexOperands(MCInst &Inst, unsigned N) const { | |||
3398 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3398, __extension__ __PRETTY_FUNCTION__)); | |||
3399 | Inst.addOperand(MCOperand::createImm(getVectorIndex())); | |||
3400 | } | |||
3401 | ||||
3402 | void addNEONi8splatOperands(MCInst &Inst, unsigned N) const { | |||
3403 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3403, __extension__ __PRETTY_FUNCTION__)); | |||
3404 | // The immediate encodes the type of constant as well as the value. | |||
3405 | // Mask in that this is an i8 splat. | |||
3406 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
3407 | Inst.addOperand(MCOperand::createImm(CE->getValue() | 0xe00)); | |||
3408 | } | |||
3409 | ||||
3410 | void addNEONi16splatOperands(MCInst &Inst, unsigned N) const { | |||
3411 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3411, __extension__ __PRETTY_FUNCTION__)); | |||
3412 | // The immediate encodes the type of constant as well as the value. | |||
3413 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
3414 | unsigned Value = CE->getValue(); | |||
3415 | Value = ARM_AM::encodeNEONi16splat(Value); | |||
3416 | Inst.addOperand(MCOperand::createImm(Value)); | |||
3417 | } | |||
3418 | ||||
3419 | void addNEONi16splatNotOperands(MCInst &Inst, unsigned N) const { | |||
3420 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3420, __extension__ __PRETTY_FUNCTION__)); | |||
3421 | // The immediate encodes the type of constant as well as the value. | |||
3422 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
3423 | unsigned Value = CE->getValue(); | |||
3424 | Value = ARM_AM::encodeNEONi16splat(~Value & 0xffff); | |||
3425 | Inst.addOperand(MCOperand::createImm(Value)); | |||
3426 | } | |||
3427 | ||||
3428 | void addNEONi32splatOperands(MCInst &Inst, unsigned N) const { | |||
3429 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3429, __extension__ __PRETTY_FUNCTION__)); | |||
3430 | // The immediate encodes the type of constant as well as the value. | |||
3431 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
3432 | unsigned Value = CE->getValue(); | |||
3433 | Value = ARM_AM::encodeNEONi32splat(Value); | |||
3434 | Inst.addOperand(MCOperand::createImm(Value)); | |||
3435 | } | |||
3436 | ||||
3437 | void addNEONi32splatNotOperands(MCInst &Inst, unsigned N) const { | |||
3438 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3438, __extension__ __PRETTY_FUNCTION__)); | |||
3439 | // The immediate encodes the type of constant as well as the value. | |||
3440 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
3441 | unsigned Value = CE->getValue(); | |||
3442 | Value = ARM_AM::encodeNEONi32splat(~Value); | |||
3443 | Inst.addOperand(MCOperand::createImm(Value)); | |||
3444 | } | |||
3445 | ||||
3446 | void addNEONi8ReplicateOperands(MCInst &Inst, bool Inv) const { | |||
3447 | // The immediate encodes the type of constant as well as the value. | |||
3448 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
3449 | assert((Inst.getOpcode() == ARM::VMOVv8i8 ||(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv8i8 || Inst.getOpcode() == ARM::VMOVv16i8) && "All instructions that wants to replicate non-zero byte " "always must be replaced with VMOVv8i8 or VMOVv16i8.") ? void (0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv8i8 || Inst.getOpcode() == ARM::VMOVv16i8) && \"All instructions that wants to replicate non-zero byte \" \"always must be replaced with VMOVv8i8 or VMOVv16i8.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3452, __extension__ __PRETTY_FUNCTION__)) | |||
3450 | Inst.getOpcode() == ARM::VMOVv16i8) &&(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv8i8 || Inst.getOpcode() == ARM::VMOVv16i8) && "All instructions that wants to replicate non-zero byte " "always must be replaced with VMOVv8i8 or VMOVv16i8.") ? void (0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv8i8 || Inst.getOpcode() == ARM::VMOVv16i8) && \"All instructions that wants to replicate non-zero byte \" \"always must be replaced with VMOVv8i8 or VMOVv16i8.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3452, __extension__ __PRETTY_FUNCTION__)) | |||
3451 | "All instructions that wants to replicate non-zero byte "(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv8i8 || Inst.getOpcode() == ARM::VMOVv16i8) && "All instructions that wants to replicate non-zero byte " "always must be replaced with VMOVv8i8 or VMOVv16i8.") ? void (0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv8i8 || Inst.getOpcode() == ARM::VMOVv16i8) && \"All instructions that wants to replicate non-zero byte \" \"always must be replaced with VMOVv8i8 or VMOVv16i8.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3452, __extension__ __PRETTY_FUNCTION__)) | |||
3452 | "always must be replaced with VMOVv8i8 or VMOVv16i8.")(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv8i8 || Inst.getOpcode() == ARM::VMOVv16i8) && "All instructions that wants to replicate non-zero byte " "always must be replaced with VMOVv8i8 or VMOVv16i8.") ? void (0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv8i8 || Inst.getOpcode() == ARM::VMOVv16i8) && \"All instructions that wants to replicate non-zero byte \" \"always must be replaced with VMOVv8i8 or VMOVv16i8.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3452, __extension__ __PRETTY_FUNCTION__)); | |||
3453 | unsigned Value = CE->getValue(); | |||
3454 | if (Inv) | |||
3455 | Value = ~Value; | |||
3456 | unsigned B = Value & 0xff; | |||
3457 | B |= 0xe00; // cmode = 0b1110 | |||
3458 | Inst.addOperand(MCOperand::createImm(B)); | |||
3459 | } | |||
3460 | ||||
3461 | void addNEONinvi8ReplicateOperands(MCInst &Inst, unsigned N) const { | |||
3462 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3462, __extension__ __PRETTY_FUNCTION__)); | |||
3463 | addNEONi8ReplicateOperands(Inst, true); | |||
3464 | } | |||
3465 | ||||
3466 | static unsigned encodeNeonVMOVImmediate(unsigned Value) { | |||
3467 | if (Value >= 256 && Value <= 0xffff) | |||
3468 | Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200); | |||
3469 | else if (Value > 0xffff && Value <= 0xffffff) | |||
3470 | Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400); | |||
3471 | else if (Value > 0xffffff) | |||
3472 | Value = (Value >> 24) | 0x600; | |||
3473 | return Value; | |||
3474 | } | |||
3475 | ||||
3476 | void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const { | |||
3477 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3477, __extension__ __PRETTY_FUNCTION__)); | |||
3478 | // The immediate encodes the type of constant as well as the value. | |||
3479 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
3480 | unsigned Value = encodeNeonVMOVImmediate(CE->getValue()); | |||
3481 | Inst.addOperand(MCOperand::createImm(Value)); | |||
3482 | } | |||
3483 | ||||
3484 | void addNEONvmovi8ReplicateOperands(MCInst &Inst, unsigned N) const { | |||
3485 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3485, __extension__ __PRETTY_FUNCTION__)); | |||
3486 | addNEONi8ReplicateOperands(Inst, false); | |||
3487 | } | |||
3488 | ||||
3489 | void addNEONvmovi16ReplicateOperands(MCInst &Inst, unsigned N) const { | |||
3490 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3490, __extension__ __PRETTY_FUNCTION__)); | |||
3491 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
3492 | assert((Inst.getOpcode() == ARM::VMOVv4i16 ||(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv4i16 || Inst.getOpcode() == ARM::VMOVv8i16 || Inst.getOpcode() == ARM::VMVNv4i16 || Inst.getOpcode() == ARM::VMVNv8i16) && "All instructions that want to replicate non-zero half-word " "always must be replaced with V{MOV,MVN}v{4,8}i16.") ? void ( 0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv4i16 || Inst.getOpcode() == ARM::VMOVv8i16 || Inst.getOpcode() == ARM::VMVNv4i16 || Inst.getOpcode() == ARM::VMVNv8i16) && \"All instructions that want to replicate non-zero half-word \" \"always must be replaced with V{MOV,MVN}v{4,8}i16.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3497, __extension__ __PRETTY_FUNCTION__)) | |||
3493 | Inst.getOpcode() == ARM::VMOVv8i16 ||(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv4i16 || Inst.getOpcode() == ARM::VMOVv8i16 || Inst.getOpcode() == ARM::VMVNv4i16 || Inst.getOpcode() == ARM::VMVNv8i16) && "All instructions that want to replicate non-zero half-word " "always must be replaced with V{MOV,MVN}v{4,8}i16.") ? void ( 0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv4i16 || Inst.getOpcode() == ARM::VMOVv8i16 || Inst.getOpcode() == ARM::VMVNv4i16 || Inst.getOpcode() == ARM::VMVNv8i16) && \"All instructions that want to replicate non-zero half-word \" \"always must be replaced with V{MOV,MVN}v{4,8}i16.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3497, __extension__ __PRETTY_FUNCTION__)) | |||
3494 | Inst.getOpcode() == ARM::VMVNv4i16 ||(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv4i16 || Inst.getOpcode() == ARM::VMOVv8i16 || Inst.getOpcode() == ARM::VMVNv4i16 || Inst.getOpcode() == ARM::VMVNv8i16) && "All instructions that want to replicate non-zero half-word " "always must be replaced with V{MOV,MVN}v{4,8}i16.") ? void ( 0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv4i16 || Inst.getOpcode() == ARM::VMOVv8i16 || Inst.getOpcode() == ARM::VMVNv4i16 || Inst.getOpcode() == ARM::VMVNv8i16) && \"All instructions that want to replicate non-zero half-word \" \"always must be replaced with V{MOV,MVN}v{4,8}i16.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3497, __extension__ __PRETTY_FUNCTION__)) | |||
3495 | Inst.getOpcode() == ARM::VMVNv8i16) &&(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv4i16 || Inst.getOpcode() == ARM::VMOVv8i16 || Inst.getOpcode() == ARM::VMVNv4i16 || Inst.getOpcode() == ARM::VMVNv8i16) && "All instructions that want to replicate non-zero half-word " "always must be replaced with V{MOV,MVN}v{4,8}i16.") ? void ( 0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv4i16 || Inst.getOpcode() == ARM::VMOVv8i16 || Inst.getOpcode() == ARM::VMVNv4i16 || Inst.getOpcode() == ARM::VMVNv8i16) && \"All instructions that want to replicate non-zero half-word \" \"always must be replaced with V{MOV,MVN}v{4,8}i16.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3497, __extension__ __PRETTY_FUNCTION__)) | |||
3496 | "All instructions that want to replicate non-zero half-word "(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv4i16 || Inst.getOpcode() == ARM::VMOVv8i16 || Inst.getOpcode() == ARM::VMVNv4i16 || Inst.getOpcode() == ARM::VMVNv8i16) && "All instructions that want to replicate non-zero half-word " "always must be replaced with V{MOV,MVN}v{4,8}i16.") ? void ( 0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv4i16 || Inst.getOpcode() == ARM::VMOVv8i16 || Inst.getOpcode() == ARM::VMVNv4i16 || Inst.getOpcode() == ARM::VMVNv8i16) && \"All instructions that want to replicate non-zero half-word \" \"always must be replaced with V{MOV,MVN}v{4,8}i16.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3497, __extension__ __PRETTY_FUNCTION__)) | |||
3497 | "always must be replaced with V{MOV,MVN}v{4,8}i16.")(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv4i16 || Inst.getOpcode() == ARM::VMOVv8i16 || Inst.getOpcode() == ARM::VMVNv4i16 || Inst.getOpcode() == ARM::VMVNv8i16) && "All instructions that want to replicate non-zero half-word " "always must be replaced with V{MOV,MVN}v{4,8}i16.") ? void ( 0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv4i16 || Inst.getOpcode() == ARM::VMOVv8i16 || Inst.getOpcode() == ARM::VMVNv4i16 || Inst.getOpcode() == ARM::VMVNv8i16) && \"All instructions that want to replicate non-zero half-word \" \"always must be replaced with V{MOV,MVN}v{4,8}i16.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3497, __extension__ __PRETTY_FUNCTION__)); | |||
3498 | uint64_t Value = CE->getValue(); | |||
3499 | unsigned Elem = Value & 0xffff; | |||
3500 | if (Elem >= 256) | |||
3501 | Elem = (Elem >> 8) | 0x200; | |||
3502 | Inst.addOperand(MCOperand::createImm(Elem)); | |||
3503 | } | |||
3504 | ||||
3505 | void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const { | |||
3506 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3506, __extension__ __PRETTY_FUNCTION__)); | |||
3507 | // The immediate encodes the type of constant as well as the value. | |||
3508 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
3509 | unsigned Value = encodeNeonVMOVImmediate(~CE->getValue()); | |||
3510 | Inst.addOperand(MCOperand::createImm(Value)); | |||
3511 | } | |||
3512 | ||||
3513 | void addNEONvmovi32ReplicateOperands(MCInst &Inst, unsigned N) const { | |||
3514 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3514, __extension__ __PRETTY_FUNCTION__)); | |||
3515 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
3516 | assert((Inst.getOpcode() == ARM::VMOVv2i32 ||(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv2i32 || Inst.getOpcode() == ARM::VMOVv4i32 || Inst.getOpcode() == ARM::VMVNv2i32 || Inst.getOpcode() == ARM::VMVNv4i32) && "All instructions that want to replicate non-zero word " "always must be replaced with V{MOV,MVN}v{2,4}i32." ) ? void (0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv2i32 || Inst.getOpcode() == ARM::VMOVv4i32 || Inst.getOpcode() == ARM::VMVNv2i32 || Inst.getOpcode() == ARM::VMVNv4i32) && \"All instructions that want to replicate non-zero word \" \"always must be replaced with V{MOV,MVN}v{2,4}i32.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3521, __extension__ __PRETTY_FUNCTION__)) | |||
3517 | Inst.getOpcode() == ARM::VMOVv4i32 ||(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv2i32 || Inst.getOpcode() == ARM::VMOVv4i32 || Inst.getOpcode() == ARM::VMVNv2i32 || Inst.getOpcode() == ARM::VMVNv4i32) && "All instructions that want to replicate non-zero word " "always must be replaced with V{MOV,MVN}v{2,4}i32." ) ? void (0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv2i32 || Inst.getOpcode() == ARM::VMOVv4i32 || Inst.getOpcode() == ARM::VMVNv2i32 || Inst.getOpcode() == ARM::VMVNv4i32) && \"All instructions that want to replicate non-zero word \" \"always must be replaced with V{MOV,MVN}v{2,4}i32.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3521, __extension__ __PRETTY_FUNCTION__)) | |||
3518 | Inst.getOpcode() == ARM::VMVNv2i32 ||(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv2i32 || Inst.getOpcode() == ARM::VMOVv4i32 || Inst.getOpcode() == ARM::VMVNv2i32 || Inst.getOpcode() == ARM::VMVNv4i32) && "All instructions that want to replicate non-zero word " "always must be replaced with V{MOV,MVN}v{2,4}i32." ) ? void (0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv2i32 || Inst.getOpcode() == ARM::VMOVv4i32 || Inst.getOpcode() == ARM::VMVNv2i32 || Inst.getOpcode() == ARM::VMVNv4i32) && \"All instructions that want to replicate non-zero word \" \"always must be replaced with V{MOV,MVN}v{2,4}i32.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3521, __extension__ __PRETTY_FUNCTION__)) | |||
3519 | Inst.getOpcode() == ARM::VMVNv4i32) &&(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv2i32 || Inst.getOpcode() == ARM::VMOVv4i32 || Inst.getOpcode() == ARM::VMVNv2i32 || Inst.getOpcode() == ARM::VMVNv4i32) && "All instructions that want to replicate non-zero word " "always must be replaced with V{MOV,MVN}v{2,4}i32." ) ? void (0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv2i32 || Inst.getOpcode() == ARM::VMOVv4i32 || Inst.getOpcode() == ARM::VMVNv2i32 || Inst.getOpcode() == ARM::VMVNv4i32) && \"All instructions that want to replicate non-zero word \" \"always must be replaced with V{MOV,MVN}v{2,4}i32.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3521, __extension__ __PRETTY_FUNCTION__)) | |||
3520 | "All instructions that want to replicate non-zero word "(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv2i32 || Inst.getOpcode() == ARM::VMOVv4i32 || Inst.getOpcode() == ARM::VMVNv2i32 || Inst.getOpcode() == ARM::VMVNv4i32) && "All instructions that want to replicate non-zero word " "always must be replaced with V{MOV,MVN}v{2,4}i32." ) ? void (0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv2i32 || Inst.getOpcode() == ARM::VMOVv4i32 || Inst.getOpcode() == ARM::VMVNv2i32 || Inst.getOpcode() == ARM::VMVNv4i32) && \"All instructions that want to replicate non-zero word \" \"always must be replaced with V{MOV,MVN}v{2,4}i32.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3521, __extension__ __PRETTY_FUNCTION__)) | |||
3521 | "always must be replaced with V{MOV,MVN}v{2,4}i32.")(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv2i32 || Inst.getOpcode() == ARM::VMOVv4i32 || Inst.getOpcode() == ARM::VMVNv2i32 || Inst.getOpcode() == ARM::VMVNv4i32) && "All instructions that want to replicate non-zero word " "always must be replaced with V{MOV,MVN}v{2,4}i32." ) ? void (0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv2i32 || Inst.getOpcode() == ARM::VMOVv4i32 || Inst.getOpcode() == ARM::VMVNv2i32 || Inst.getOpcode() == ARM::VMVNv4i32) && \"All instructions that want to replicate non-zero word \" \"always must be replaced with V{MOV,MVN}v{2,4}i32.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3521, __extension__ __PRETTY_FUNCTION__)); | |||
3522 | uint64_t Value = CE->getValue(); | |||
3523 | unsigned Elem = encodeNeonVMOVImmediate(Value & 0xffffffff); | |||
3524 | Inst.addOperand(MCOperand::createImm(Elem)); | |||
3525 | } | |||
3526 | ||||
3527 | void addNEONi64splatOperands(MCInst &Inst, unsigned N) const { | |||
3528 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3528, __extension__ __PRETTY_FUNCTION__)); | |||
3529 | // The immediate encodes the type of constant as well as the value. | |||
3530 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
3531 | uint64_t Value = CE->getValue(); | |||
3532 | unsigned Imm = 0; | |||
3533 | for (unsigned i = 0; i < 8; ++i, Value >>= 8) { | |||
3534 | Imm |= (Value & 1) << i; | |||
3535 | } | |||
3536 | Inst.addOperand(MCOperand::createImm(Imm | 0x1e00)); | |||
3537 | } | |||
3538 | ||||
3539 | void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const { | |||
3540 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3540, __extension__ __PRETTY_FUNCTION__)); | |||
3541 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
3542 | Inst.addOperand(MCOperand::createImm(CE->getValue() / 90)); | |||
3543 | } | |||
3544 | ||||
3545 | void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const { | |||
3546 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3546, __extension__ __PRETTY_FUNCTION__)); | |||
3547 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
3548 | Inst.addOperand(MCOperand::createImm((CE->getValue() - 90) / 180)); | |||
3549 | } | |||
3550 | ||||
3551 | void addMveSaturateOperands(MCInst &Inst, unsigned N) const { | |||
3552 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3552, __extension__ __PRETTY_FUNCTION__)); | |||
3553 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
3554 | unsigned Imm = CE->getValue(); | |||
3555 | assert((Imm == 48 || Imm == 64) && "Invalid saturate operand")(static_cast <bool> ((Imm == 48 || Imm == 64) && "Invalid saturate operand") ? void (0) : __assert_fail ("(Imm == 48 || Imm == 64) && \"Invalid saturate operand\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3555, __extension__ __PRETTY_FUNCTION__)); | |||
3556 | Inst.addOperand(MCOperand::createImm(Imm == 48 ? 1 : 0)); | |||
3557 | } | |||
3558 | ||||
3559 | void print(raw_ostream &OS) const override; | |||
3560 | ||||
3561 | static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S) { | |||
3562 | auto Op = std::make_unique<ARMOperand>(k_ITCondMask); | |||
3563 | Op->ITMask.Mask = Mask; | |||
3564 | Op->StartLoc = S; | |||
3565 | Op->EndLoc = S; | |||
3566 | return Op; | |||
3567 | } | |||
3568 | ||||
3569 | static std::unique_ptr<ARMOperand> CreateCondCode(ARMCC::CondCodes CC, | |||
3570 | SMLoc S) { | |||
3571 | auto Op = std::make_unique<ARMOperand>(k_CondCode); | |||
3572 | Op->CC.Val = CC; | |||
3573 | Op->StartLoc = S; | |||
3574 | Op->EndLoc = S; | |||
3575 | return Op; | |||
3576 | } | |||
3577 | ||||
3578 | static std::unique_ptr<ARMOperand> CreateVPTPred(ARMVCC::VPTCodes CC, | |||
3579 | SMLoc S) { | |||
3580 | auto Op = std::make_unique<ARMOperand>(k_VPTPred); | |||
3581 | Op->VCC.Val = CC; | |||
3582 | Op->StartLoc = S; | |||
3583 | Op->EndLoc = S; | |||
3584 | return Op; | |||
3585 | } | |||
3586 | ||||
3587 | static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S) { | |||
3588 | auto Op = std::make_unique<ARMOperand>(k_CoprocNum); | |||
3589 | Op->Cop.Val = CopVal; | |||
3590 | Op->StartLoc = S; | |||
3591 | Op->EndLoc = S; | |||
3592 | return Op; | |||
3593 | } | |||
3594 | ||||
3595 | static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S) { | |||
3596 | auto Op = std::make_unique<ARMOperand>(k_CoprocReg); | |||
3597 | Op->Cop.Val = CopVal; | |||
3598 | Op->StartLoc = S; | |||
3599 | Op->EndLoc = S; | |||
3600 | return Op; | |||
3601 | } | |||
3602 | ||||
3603 | static std::unique_ptr<ARMOperand> CreateCoprocOption(unsigned Val, SMLoc S, | |||
3604 | SMLoc E) { | |||
3605 | auto Op = std::make_unique<ARMOperand>(k_CoprocOption); | |||
3606 | Op->Cop.Val = Val; | |||
3607 | Op->StartLoc = S; | |||
3608 | Op->EndLoc = E; | |||
3609 | return Op; | |||
3610 | } | |||
3611 | ||||
3612 | static std::unique_ptr<ARMOperand> CreateCCOut(unsigned RegNum, SMLoc S) { | |||
3613 | auto Op = std::make_unique<ARMOperand>(k_CCOut); | |||
3614 | Op->Reg.RegNum = RegNum; | |||
3615 | Op->StartLoc = S; | |||
3616 | Op->EndLoc = S; | |||
3617 | return Op; | |||
3618 | } | |||
3619 | ||||
3620 | static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S) { | |||
3621 | auto Op = std::make_unique<ARMOperand>(k_Token); | |||
3622 | Op->Tok.Data = Str.data(); | |||
3623 | Op->Tok.Length = Str.size(); | |||
3624 | Op->StartLoc = S; | |||
3625 | Op->EndLoc = S; | |||
3626 | return Op; | |||
3627 | } | |||
3628 | ||||
3629 | static std::unique_ptr<ARMOperand> CreateReg(unsigned RegNum, SMLoc S, | |||
3630 | SMLoc E) { | |||
3631 | auto Op = std::make_unique<ARMOperand>(k_Register); | |||
3632 | Op->Reg.RegNum = RegNum; | |||
3633 | Op->StartLoc = S; | |||
3634 | Op->EndLoc = E; | |||
3635 | return Op; | |||
3636 | } | |||
3637 | ||||
3638 | static std::unique_ptr<ARMOperand> | |||
3639 | CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, unsigned SrcReg, | |||
3640 | unsigned ShiftReg, unsigned ShiftImm, SMLoc S, | |||
3641 | SMLoc E) { | |||
3642 | auto Op = std::make_unique<ARMOperand>(k_ShiftedRegister); | |||
3643 | Op->RegShiftedReg.ShiftTy = ShTy; | |||
3644 | Op->RegShiftedReg.SrcReg = SrcReg; | |||
3645 | Op->RegShiftedReg.ShiftReg = ShiftReg; | |||
3646 | Op->RegShiftedReg.ShiftImm = ShiftImm; | |||
3647 | Op->StartLoc = S; | |||
3648 | Op->EndLoc = E; | |||
3649 | return Op; | |||
3650 | } | |||
3651 | ||||
3652 | static std::unique_ptr<ARMOperand> | |||
3653 | CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, unsigned SrcReg, | |||
3654 | unsigned ShiftImm, SMLoc S, SMLoc E) { | |||
3655 | auto Op = std::make_unique<ARMOperand>(k_ShiftedImmediate); | |||
3656 | Op->RegShiftedImm.ShiftTy = ShTy; | |||
3657 | Op->RegShiftedImm.SrcReg = SrcReg; | |||
3658 | Op->RegShiftedImm.ShiftImm = ShiftImm; | |||
3659 | Op->StartLoc = S; | |||
3660 | Op->EndLoc = E; | |||
3661 | return Op; | |||
3662 | } | |||
3663 | ||||
3664 | static std::unique_ptr<ARMOperand> CreateShifterImm(bool isASR, unsigned Imm, | |||
3665 | SMLoc S, SMLoc E) { | |||
3666 | auto Op = std::make_unique<ARMOperand>(k_ShifterImmediate); | |||
3667 | Op->ShifterImm.isASR = isASR; | |||
3668 | Op->ShifterImm.Imm = Imm; | |||
3669 | Op->StartLoc = S; | |||
3670 | Op->EndLoc = E; | |||
3671 | return Op; | |||
3672 | } | |||
3673 | ||||
3674 | static std::unique_ptr<ARMOperand> CreateRotImm(unsigned Imm, SMLoc S, | |||
3675 | SMLoc E) { | |||
3676 | auto Op = std::make_unique<ARMOperand>(k_RotateImmediate); | |||
3677 | Op->RotImm.Imm = Imm; | |||
3678 | Op->StartLoc = S; | |||
3679 | Op->EndLoc = E; | |||
3680 | return Op; | |||
3681 | } | |||
3682 | ||||
3683 | static std::unique_ptr<ARMOperand> CreateModImm(unsigned Bits, unsigned Rot, | |||
3684 | SMLoc S, SMLoc E) { | |||
3685 | auto Op = std::make_unique<ARMOperand>(k_ModifiedImmediate); | |||
3686 | Op->ModImm.Bits = Bits; | |||
3687 | Op->ModImm.Rot = Rot; | |||
3688 | Op->StartLoc = S; | |||
3689 | Op->EndLoc = E; | |||
3690 | return Op; | |||
3691 | } | |||
3692 | ||||
3693 | static std::unique_ptr<ARMOperand> | |||
3694 | CreateConstantPoolImm(const MCExpr *Val, SMLoc S, SMLoc E) { | |||
3695 | auto Op = std::make_unique<ARMOperand>(k_ConstantPoolImmediate); | |||
3696 | Op->Imm.Val = Val; | |||
3697 | Op->StartLoc = S; | |||
3698 | Op->EndLoc = E; | |||
3699 | return Op; | |||
3700 | } | |||
3701 | ||||
3702 | static std::unique_ptr<ARMOperand> | |||
3703 | CreateBitfield(unsigned LSB, unsigned Width, SMLoc S, SMLoc E) { | |||
3704 | auto Op = std::make_unique<ARMOperand>(k_BitfieldDescriptor); | |||
3705 | Op->Bitfield.LSB = LSB; | |||
3706 | Op->Bitfield.Width = Width; | |||
3707 | Op->StartLoc = S; | |||
3708 | Op->EndLoc = E; | |||
3709 | return Op; | |||
3710 | } | |||
3711 | ||||
3712 | static std::unique_ptr<ARMOperand> | |||
3713 | CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs, | |||
3714 | SMLoc StartLoc, SMLoc EndLoc) { | |||
3715 | assert(Regs.size() > 0 && "RegList contains no registers?")(static_cast <bool> (Regs.size() > 0 && "RegList contains no registers?" ) ? void (0) : __assert_fail ("Regs.size() > 0 && \"RegList contains no registers?\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3715, __extension__ __PRETTY_FUNCTION__)); | |||
3716 | KindTy Kind = k_RegisterList; | |||
3717 | ||||
3718 | if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains( | |||
3719 | Regs.front().second)) { | |||
3720 | if (Regs.back().second == ARM::VPR) | |||
3721 | Kind = k_FPDRegisterListWithVPR; | |||
3722 | else | |||
3723 | Kind = k_DPRRegisterList; | |||
3724 | } else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains( | |||
3725 | Regs.front().second)) { | |||
3726 | if (Regs.back().second == ARM::VPR) | |||
3727 | Kind = k_FPSRegisterListWithVPR; | |||
3728 | else | |||
3729 | Kind = k_SPRRegisterList; | |||
3730 | } | |||
3731 | ||||
3732 | if (Kind == k_RegisterList && Regs.back().second == ARM::APSR) | |||
3733 | Kind = k_RegisterListWithAPSR; | |||
3734 | ||||
3735 | assert(llvm::is_sorted(Regs) && "Register list must be sorted by encoding")(static_cast <bool> (llvm::is_sorted(Regs) && "Register list must be sorted by encoding" ) ? void (0) : __assert_fail ("llvm::is_sorted(Regs) && \"Register list must be sorted by encoding\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3735, __extension__ __PRETTY_FUNCTION__)); | |||
3736 | ||||
3737 | auto Op = std::make_unique<ARMOperand>(Kind); | |||
3738 | for (const auto &P : Regs) | |||
3739 | Op->Registers.push_back(P.second); | |||
3740 | ||||
3741 | Op->StartLoc = StartLoc; | |||
3742 | Op->EndLoc = EndLoc; | |||
3743 | return Op; | |||
3744 | } | |||
3745 | ||||
3746 | static std::unique_ptr<ARMOperand> CreateVectorList(unsigned RegNum, | |||
3747 | unsigned Count, | |||
3748 | bool isDoubleSpaced, | |||
3749 | SMLoc S, SMLoc E) { | |||
3750 | auto Op = std::make_unique<ARMOperand>(k_VectorList); | |||
3751 | Op->VectorList.RegNum = RegNum; | |||
3752 | Op->VectorList.Count = Count; | |||
3753 | Op->VectorList.isDoubleSpaced = isDoubleSpaced; | |||
3754 | Op->StartLoc = S; | |||
3755 | Op->EndLoc = E; | |||
3756 | return Op; | |||
3757 | } | |||
3758 | ||||
3759 | static std::unique_ptr<ARMOperand> | |||
3760 | CreateVectorListAllLanes(unsigned RegNum, unsigned Count, bool isDoubleSpaced, | |||
3761 | SMLoc S, SMLoc E) { | |||
3762 | auto Op = std::make_unique<ARMOperand>(k_VectorListAllLanes); | |||
3763 | Op->VectorList.RegNum = RegNum; | |||
3764 | Op->VectorList.Count = Count; | |||
3765 | Op->VectorList.isDoubleSpaced = isDoubleSpaced; | |||
3766 | Op->StartLoc = S; | |||
3767 | Op->EndLoc = E; | |||
3768 | return Op; | |||
3769 | } | |||
3770 | ||||
3771 | static std::unique_ptr<ARMOperand> | |||
3772 | CreateVectorListIndexed(unsigned RegNum, unsigned Count, unsigned Index, | |||
3773 | bool isDoubleSpaced, SMLoc S, SMLoc E) { | |||
3774 | auto Op = std::make_unique<ARMOperand>(k_VectorListIndexed); | |||
3775 | Op->VectorList.RegNum = RegNum; | |||
3776 | Op->VectorList.Count = Count; | |||
3777 | Op->VectorList.LaneIndex = Index; | |||
3778 | Op->VectorList.isDoubleSpaced = isDoubleSpaced; | |||
3779 | Op->StartLoc = S; | |||
3780 | Op->EndLoc = E; | |||
3781 | return Op; | |||
3782 | } | |||
3783 | ||||
3784 | static std::unique_ptr<ARMOperand> | |||
3785 | CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) { | |||
3786 | auto Op = std::make_unique<ARMOperand>(k_VectorIndex); | |||
3787 | Op->VectorIndex.Val = Idx; | |||
3788 | Op->StartLoc = S; | |||
3789 | Op->EndLoc = E; | |||
3790 | return Op; | |||
3791 | } | |||
3792 | ||||
3793 | static std::unique_ptr<ARMOperand> CreateImm(const MCExpr *Val, SMLoc S, | |||
3794 | SMLoc E) { | |||
3795 | auto Op = std::make_unique<ARMOperand>(k_Immediate); | |||
3796 | Op->Imm.Val = Val; | |||
3797 | Op->StartLoc = S; | |||
3798 | Op->EndLoc = E; | |||
3799 | return Op; | |||
3800 | } | |||
3801 | ||||
3802 | static std::unique_ptr<ARMOperand> | |||
3803 | CreateMem(unsigned BaseRegNum, const MCExpr *OffsetImm, unsigned OffsetRegNum, | |||
3804 | ARM_AM::ShiftOpc ShiftType, unsigned ShiftImm, unsigned Alignment, | |||
3805 | bool isNegative, SMLoc S, SMLoc E, SMLoc AlignmentLoc = SMLoc()) { | |||
3806 | auto Op = std::make_unique<ARMOperand>(k_Memory); | |||
3807 | Op->Memory.BaseRegNum = BaseRegNum; | |||
3808 | Op->Memory.OffsetImm = OffsetImm; | |||
3809 | Op->Memory.OffsetRegNum = OffsetRegNum; | |||
3810 | Op->Memory.ShiftType = ShiftType; | |||
3811 | Op->Memory.ShiftImm = ShiftImm; | |||
3812 | Op->Memory.Alignment = Alignment; | |||
3813 | Op->Memory.isNegative = isNegative; | |||
3814 | Op->StartLoc = S; | |||
3815 | Op->EndLoc = E; | |||
3816 | Op->AlignmentLoc = AlignmentLoc; | |||
3817 | return Op; | |||
3818 | } | |||
3819 | ||||
3820 | static std::unique_ptr<ARMOperand> | |||
3821 | CreatePostIdxReg(unsigned RegNum, bool isAdd, ARM_AM::ShiftOpc ShiftTy, | |||
3822 | unsigned ShiftImm, SMLoc S, SMLoc E) { | |||
3823 | auto Op = std::make_unique<ARMOperand>(k_PostIndexRegister); | |||
3824 | Op->PostIdxReg.RegNum = RegNum; | |||
3825 | Op->PostIdxReg.isAdd = isAdd; | |||
3826 | Op->PostIdxReg.ShiftTy = ShiftTy; | |||
3827 | Op->PostIdxReg.ShiftImm = ShiftImm; | |||
3828 | Op->StartLoc = S; | |||
3829 | Op->EndLoc = E; | |||
3830 | return Op; | |||
3831 | } | |||
3832 | ||||
3833 | static std::unique_ptr<ARMOperand> CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, | |||
3834 | SMLoc S) { | |||
3835 | auto Op = std::make_unique<ARMOperand>(k_MemBarrierOpt); | |||
3836 | Op->MBOpt.Val = Opt; | |||
3837 | Op->StartLoc = S; | |||
3838 | Op->EndLoc = S; | |||
3839 | return Op; | |||
3840 | } | |||
3841 | ||||
3842 | static std::unique_ptr<ARMOperand> | |||
3843 | CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S) { | |||
3844 | auto Op = std::make_unique<ARMOperand>(k_InstSyncBarrierOpt); | |||
3845 | Op->ISBOpt.Val = Opt; | |||
3846 | Op->StartLoc = S; | |||
3847 | Op->EndLoc = S; | |||
3848 | return Op; | |||
3849 | } | |||
3850 | ||||
3851 | static std::unique_ptr<ARMOperand> | |||
3852 | CreateTraceSyncBarrierOpt(ARM_TSB::TraceSyncBOpt Opt, SMLoc S) { | |||
3853 | auto Op = std::make_unique<ARMOperand>(k_TraceSyncBarrierOpt); | |||
3854 | Op->TSBOpt.Val = Opt; | |||
3855 | Op->StartLoc = S; | |||
3856 | Op->EndLoc = S; | |||
3857 | return Op; | |||
3858 | } | |||
3859 | ||||
3860 | static std::unique_ptr<ARMOperand> CreateProcIFlags(ARM_PROC::IFlags IFlags, | |||
3861 | SMLoc S) { | |||
3862 | auto Op = std::make_unique<ARMOperand>(k_ProcIFlags); | |||
3863 | Op->IFlags.Val = IFlags; | |||
3864 | Op->StartLoc = S; | |||
3865 | Op->EndLoc = S; | |||
3866 | return Op; | |||
3867 | } | |||
3868 | ||||
3869 | static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S) { | |||
3870 | auto Op = std::make_unique<ARMOperand>(k_MSRMask); | |||
3871 | Op->MMask.Val = MMask; | |||
3872 | Op->StartLoc = S; | |||
3873 | Op->EndLoc = S; | |||
3874 | return Op; | |||
3875 | } | |||
3876 | ||||
3877 | static std::unique_ptr<ARMOperand> CreateBankedReg(unsigned Reg, SMLoc S) { | |||
3878 | auto Op = std::make_unique<ARMOperand>(k_BankedReg); | |||
3879 | Op->BankedReg.Val = Reg; | |||
3880 | Op->StartLoc = S; | |||
3881 | Op->EndLoc = S; | |||
3882 | return Op; | |||
3883 | } | |||
3884 | }; | |||
3885 | ||||
3886 | } // end anonymous namespace. | |||
3887 | ||||
3888 | void ARMOperand::print(raw_ostream &OS) const { | |||
3889 | auto RegName = [](unsigned Reg) { | |||
3890 | if (Reg) | |||
3891 | return ARMInstPrinter::getRegisterName(Reg); | |||
3892 | else | |||
3893 | return "noreg"; | |||
3894 | }; | |||
3895 | ||||
3896 | switch (Kind) { | |||
3897 | case k_CondCode: | |||
3898 | OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">"; | |||
3899 | break; | |||
3900 | case k_VPTPred: | |||
3901 | OS << "<ARMVCC::" << ARMVPTPredToString(getVPTPred()) << ">"; | |||
3902 | break; | |||
3903 | case k_CCOut: | |||
3904 | OS << "<ccout " << RegName(getReg()) << ">"; | |||
3905 | break; | |||
3906 | case k_ITCondMask: { | |||
3907 | static const char *const MaskStr[] = { | |||
3908 | "(invalid)", "(tttt)", "(ttt)", "(ttte)", | |||
3909 | "(tt)", "(ttet)", "(tte)", "(ttee)", | |||
3910 | "(t)", "(tett)", "(tet)", "(tete)", | |||
3911 | "(te)", "(teet)", "(tee)", "(teee)", | |||
3912 | }; | |||
3913 | assert((ITMask.Mask & 0xf) == ITMask.Mask)(static_cast <bool> ((ITMask.Mask & 0xf) == ITMask. Mask) ? void (0) : __assert_fail ("(ITMask.Mask & 0xf) == ITMask.Mask" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 3913, __extension__ __PRETTY_FUNCTION__)); | |||
3914 | OS << "<it-mask " << MaskStr[ITMask.Mask] << ">"; | |||
3915 | break; | |||
3916 | } | |||
3917 | case k_CoprocNum: | |||
3918 | OS << "<coprocessor number: " << getCoproc() << ">"; | |||
3919 | break; | |||
3920 | case k_CoprocReg: | |||
3921 | OS << "<coprocessor register: " << getCoproc() << ">"; | |||
3922 | break; | |||
3923 | case k_CoprocOption: | |||
3924 | OS << "<coprocessor option: " << CoprocOption.Val << ">"; | |||
3925 | break; | |||
3926 | case k_MSRMask: | |||
3927 | OS << "<mask: " << getMSRMask() << ">"; | |||
3928 | break; | |||
3929 | case k_BankedReg: | |||
3930 | OS << "<banked reg: " << getBankedReg() << ">"; | |||
3931 | break; | |||
3932 | case k_Immediate: | |||
3933 | OS << *getImm(); | |||
3934 | break; | |||
3935 | case k_MemBarrierOpt: | |||
3936 | OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">"; | |||
3937 | break; | |||
3938 | case k_InstSyncBarrierOpt: | |||
3939 | OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">"; | |||
3940 | break; | |||
3941 | case k_TraceSyncBarrierOpt: | |||
3942 | OS << "<ARM_TSB::" << TraceSyncBOptToString(getTraceSyncBarrierOpt()) << ">"; | |||
3943 | break; | |||
3944 | case k_Memory: | |||
3945 | OS << "<memory"; | |||
3946 | if (Memory.BaseRegNum) | |||
3947 | OS << " base:" << RegName(Memory.BaseRegNum); | |||
3948 | if (Memory.OffsetImm) | |||
3949 | OS << " offset-imm:" << *Memory.OffsetImm; | |||
3950 | if (Memory.OffsetRegNum) | |||
3951 | OS << " offset-reg:" << (Memory.isNegative ? "-" : "") | |||
3952 | << RegName(Memory.OffsetRegNum); | |||
3953 | if (Memory.ShiftType != ARM_AM::no_shift) { | |||
3954 | OS << " shift-type:" << ARM_AM::getShiftOpcStr(Memory.ShiftType); | |||
3955 | OS << " shift-imm:" << Memory.ShiftImm; | |||
3956 | } | |||
3957 | if (Memory.Alignment) | |||
3958 | OS << " alignment:" << Memory.Alignment; | |||
3959 | OS << ">"; | |||
3960 | break; | |||
3961 | case k_PostIndexRegister: | |||
3962 | OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-") | |||
3963 | << RegName(PostIdxReg.RegNum); | |||
3964 | if (PostIdxReg.ShiftTy != ARM_AM::no_shift) | |||
3965 | OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " " | |||
3966 | << PostIdxReg.ShiftImm; | |||
3967 | OS << ">"; | |||
3968 | break; | |||
3969 | case k_ProcIFlags: { | |||
3970 | OS << "<ARM_PROC::"; | |||
3971 | unsigned IFlags = getProcIFlags(); | |||
3972 | for (int i=2; i >= 0; --i) | |||
3973 | if (IFlags & (1 << i)) | |||
3974 | OS << ARM_PROC::IFlagsToString(1 << i); | |||
3975 | OS << ">"; | |||
3976 | break; | |||
3977 | } | |||
3978 | case k_Register: | |||
3979 | OS << "<register " << RegName(getReg()) << ">"; | |||
3980 | break; | |||
3981 | case k_ShifterImmediate: | |||
3982 | OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl") | |||
3983 | << " #" << ShifterImm.Imm << ">"; | |||
3984 | break; | |||
3985 | case k_ShiftedRegister: | |||
3986 | OS << "<so_reg_reg " << RegName(RegShiftedReg.SrcReg) << " " | |||
3987 | << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy) << " " | |||
3988 | << RegName(RegShiftedReg.ShiftReg) << ">"; | |||
3989 | break; | |||
3990 | case k_ShiftedImmediate: | |||
3991 | OS << "<so_reg_imm " << RegName(RegShiftedImm.SrcReg) << " " | |||
3992 | << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy) << " #" | |||
3993 | << RegShiftedImm.ShiftImm << ">"; | |||
3994 | break; | |||
3995 | case k_RotateImmediate: | |||
3996 | OS << "<ror " << " #" << (RotImm.Imm * 8) << ">"; | |||
3997 | break; | |||
3998 | case k_ModifiedImmediate: | |||
3999 | OS << "<mod_imm #" << ModImm.Bits << ", #" | |||
4000 | << ModImm.Rot << ")>"; | |||
4001 | break; | |||
4002 | case k_ConstantPoolImmediate: | |||
4003 | OS << "<constant_pool_imm #" << *getConstantPoolImm(); | |||
4004 | break; | |||
4005 | case k_BitfieldDescriptor: | |||
4006 | OS << "<bitfield " << "lsb: " << Bitfield.LSB | |||
4007 | << ", width: " << Bitfield.Width << ">"; | |||
4008 | break; | |||
4009 | case k_RegisterList: | |||
4010 | case k_RegisterListWithAPSR: | |||
4011 | case k_DPRRegisterList: | |||
4012 | case k_SPRRegisterList: | |||
4013 | case k_FPSRegisterListWithVPR: | |||
4014 | case k_FPDRegisterListWithVPR: { | |||
4015 | OS << "<register_list "; | |||
4016 | ||||
4017 | const SmallVectorImpl<unsigned> &RegList = getRegList(); | |||
4018 | for (SmallVectorImpl<unsigned>::const_iterator | |||
4019 | I = RegList.begin(), E = RegList.end(); I != E; ) { | |||
4020 | OS << RegName(*I); | |||
4021 | if (++I < E) OS << ", "; | |||
4022 | } | |||
4023 | ||||
4024 | OS << ">"; | |||
4025 | break; | |||
4026 | } | |||
4027 | case k_VectorList: | |||
4028 | OS << "<vector_list " << VectorList.Count << " * " | |||
4029 | << RegName(VectorList.RegNum) << ">"; | |||
4030 | break; | |||
4031 | case k_VectorListAllLanes: | |||
4032 | OS << "<vector_list(all lanes) " << VectorList.Count << " * " | |||
4033 | << RegName(VectorList.RegNum) << ">"; | |||
4034 | break; | |||
4035 | case k_VectorListIndexed: | |||
4036 | OS << "<vector_list(lane " << VectorList.LaneIndex << ") " | |||
4037 | << VectorList.Count << " * " << RegName(VectorList.RegNum) << ">"; | |||
4038 | break; | |||
4039 | case k_Token: | |||
4040 | OS << "'" << getToken() << "'"; | |||
4041 | break; | |||
4042 | case k_VectorIndex: | |||
4043 | OS << "<vectorindex " << getVectorIndex() << ">"; | |||
4044 | break; | |||
4045 | } | |||
4046 | } | |||
4047 | ||||
4048 | /// @name Auto-generated Match Functions | |||
4049 | /// { | |||
4050 | ||||
4051 | static unsigned MatchRegisterName(StringRef Name); | |||
4052 | ||||
4053 | /// } | |||
4054 | ||||
4055 | bool ARMAsmParser::ParseRegister(unsigned &RegNo, | |||
4056 | SMLoc &StartLoc, SMLoc &EndLoc) { | |||
4057 | const AsmToken &Tok = getParser().getTok(); | |||
4058 | StartLoc = Tok.getLoc(); | |||
4059 | EndLoc = Tok.getEndLoc(); | |||
4060 | RegNo = tryParseRegister(); | |||
4061 | ||||
4062 | return (RegNo == (unsigned)-1); | |||
4063 | } | |||
4064 | ||||
4065 | OperandMatchResultTy ARMAsmParser::tryParseRegister(unsigned &RegNo, | |||
4066 | SMLoc &StartLoc, | |||
4067 | SMLoc &EndLoc) { | |||
4068 | if (ParseRegister(RegNo, StartLoc, EndLoc)) | |||
4069 | return MatchOperand_NoMatch; | |||
4070 | return MatchOperand_Success; | |||
4071 | } | |||
4072 | ||||
4073 | /// Try to parse a register name. The token must be an Identifier when called, | |||
4074 | /// and if it is a register name the token is eaten and the register number is | |||
4075 | /// returned. Otherwise return -1. | |||
4076 | int ARMAsmParser::tryParseRegister() { | |||
4077 | MCAsmParser &Parser = getParser(); | |||
4078 | const AsmToken &Tok = Parser.getTok(); | |||
4079 | if (Tok.isNot(AsmToken::Identifier)) return -1; | |||
4080 | ||||
4081 | std::string lowerCase = Tok.getString().lower(); | |||
4082 | unsigned RegNum = MatchRegisterName(lowerCase); | |||
4083 | if (!RegNum) { | |||
4084 | RegNum = StringSwitch<unsigned>(lowerCase) | |||
4085 | .Case("r13", ARM::SP) | |||
4086 | .Case("r14", ARM::LR) | |||
4087 | .Case("r15", ARM::PC) | |||
4088 | .Case("ip", ARM::R12) | |||
4089 | // Additional register name aliases for 'gas' compatibility. | |||
4090 | .Case("a1", ARM::R0) | |||
4091 | .Case("a2", ARM::R1) | |||
4092 | .Case("a3", ARM::R2) | |||
4093 | .Case("a4", ARM::R3) | |||
4094 | .Case("v1", ARM::R4) | |||
4095 | .Case("v2", ARM::R5) | |||
4096 | .Case("v3", ARM::R6) | |||
4097 | .Case("v4", ARM::R7) | |||
4098 | .Case("v5", ARM::R8) | |||
4099 | .Case("v6", ARM::R9) | |||
4100 | .Case("v7", ARM::R10) | |||
4101 | .Case("v8", ARM::R11) | |||
4102 | .Case("sb", ARM::R9) | |||
4103 | .Case("sl", ARM::R10) | |||
4104 | .Case("fp", ARM::R11) | |||
4105 | .Default(0); | |||
4106 | } | |||
4107 | if (!RegNum) { | |||
4108 | // Check for aliases registered via .req. Canonicalize to lower case. | |||
4109 | // That's more consistent since register names are case insensitive, and | |||
4110 | // it's how the original entry was passed in from MC/MCParser/AsmParser. | |||
4111 | StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase); | |||
4112 | // If no match, return failure. | |||
4113 | if (Entry == RegisterReqs.end()) | |||
4114 | return -1; | |||
4115 | Parser.Lex(); // Eat identifier token. | |||
4116 | return Entry->getValue(); | |||
4117 | } | |||
4118 | ||||
4119 | // Some FPUs only have 16 D registers, so D16-D31 are invalid | |||
4120 | if (!hasD32() && RegNum >= ARM::D16 && RegNum <= ARM::D31) | |||
4121 | return -1; | |||
4122 | ||||
4123 | Parser.Lex(); // Eat identifier token. | |||
4124 | ||||
4125 | return RegNum; | |||
4126 | } | |||
4127 | ||||
4128 | // Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0. | |||
4129 | // If a recoverable error occurs, return 1. If an irrecoverable error | |||
4130 | // occurs, return -1. An irrecoverable error is one where tokens have been | |||
4131 | // consumed in the process of trying to parse the shifter (i.e., when it is | |||
4132 | // indeed a shifter operand, but malformed). | |||
4133 | int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) { | |||
4134 | MCAsmParser &Parser = getParser(); | |||
4135 | SMLoc S = Parser.getTok().getLoc(); | |||
4136 | const AsmToken &Tok = Parser.getTok(); | |||
4137 | if (Tok.isNot(AsmToken::Identifier)) | |||
4138 | return -1; | |||
4139 | ||||
4140 | std::string lowerCase = Tok.getString().lower(); | |||
4141 | ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase) | |||
4142 | .Case("asl", ARM_AM::lsl) | |||
4143 | .Case("lsl", ARM_AM::lsl) | |||
4144 | .Case("lsr", ARM_AM::lsr) | |||
4145 | .Case("asr", ARM_AM::asr) | |||
4146 | .Case("ror", ARM_AM::ror) | |||
4147 | .Case("rrx", ARM_AM::rrx) | |||
4148 | .Default(ARM_AM::no_shift); | |||
4149 | ||||
4150 | if (ShiftTy == ARM_AM::no_shift) | |||
4151 | return 1; | |||
4152 | ||||
4153 | Parser.Lex(); // Eat the operator. | |||
4154 | ||||
4155 | // The source register for the shift has already been added to the | |||
4156 | // operand list, so we need to pop it off and combine it into the shifted | |||
4157 | // register operand instead. | |||
4158 | std::unique_ptr<ARMOperand> PrevOp( | |||
4159 | (ARMOperand *)Operands.pop_back_val().release()); | |||
4160 | if (!PrevOp->isReg()) | |||
4161 | return Error(PrevOp->getStartLoc(), "shift must be of a register"); | |||
4162 | int SrcReg = PrevOp->getReg(); | |||
4163 | ||||
4164 | SMLoc EndLoc; | |||
4165 | int64_t Imm = 0; | |||
4166 | int ShiftReg = 0; | |||
4167 | if (ShiftTy == ARM_AM::rrx) { | |||
4168 | // RRX Doesn't have an explicit shift amount. The encoder expects | |||
4169 | // the shift register to be the same as the source register. Seems odd, | |||
4170 | // but OK. | |||
4171 | ShiftReg = SrcReg; | |||
4172 | } else { | |||
4173 | // Figure out if this is shifted by a constant or a register (for non-RRX). | |||
4174 | if (Parser.getTok().is(AsmToken::Hash) || | |||
4175 | Parser.getTok().is(AsmToken::Dollar)) { | |||
4176 | Parser.Lex(); // Eat hash. | |||
4177 | SMLoc ImmLoc = Parser.getTok().getLoc(); | |||
4178 | const MCExpr *ShiftExpr = nullptr; | |||
4179 | if (getParser().parseExpression(ShiftExpr, EndLoc)) { | |||
4180 | Error(ImmLoc, "invalid immediate shift value"); | |||
4181 | return -1; | |||
4182 | } | |||
4183 | // The expression must be evaluatable as an immediate. | |||
4184 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr); | |||
4185 | if (!CE) { | |||
4186 | Error(ImmLoc, "invalid immediate shift value"); | |||
4187 | return -1; | |||
4188 | } | |||
4189 | // Range check the immediate. | |||
4190 | // lsl, ror: 0 <= imm <= 31 | |||
4191 | // lsr, asr: 0 <= imm <= 32 | |||
4192 | Imm = CE->getValue(); | |||
4193 | if (Imm < 0 || | |||
4194 | ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) || | |||
4195 | ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) { | |||
4196 | Error(ImmLoc, "immediate shift value out of range"); | |||
4197 | return -1; | |||
4198 | } | |||
4199 | // shift by zero is a nop. Always send it through as lsl. | |||
4200 | // ('as' compatibility) | |||
4201 | if (Imm == 0) | |||
4202 | ShiftTy = ARM_AM::lsl; | |||
4203 | } else if (Parser.getTok().is(AsmToken::Identifier)) { | |||
4204 | SMLoc L = Parser.getTok().getLoc(); | |||
4205 | EndLoc = Parser.getTok().getEndLoc(); | |||
4206 | ShiftReg = tryParseRegister(); | |||
4207 | if (ShiftReg == -1) { | |||
4208 | Error(L, "expected immediate or register in shift operand"); | |||
4209 | return -1; | |||
4210 | } | |||
4211 | } else { | |||
4212 | Error(Parser.getTok().getLoc(), | |||
4213 | "expected immediate or register in shift operand"); | |||
4214 | return -1; | |||
4215 | } | |||
4216 | } | |||
4217 | ||||
4218 | if (ShiftReg && ShiftTy != ARM_AM::rrx) | |||
4219 | Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg, | |||
4220 | ShiftReg, Imm, | |||
4221 | S, EndLoc)); | |||
4222 | else | |||
4223 | Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm, | |||
4224 | S, EndLoc)); | |||
4225 | ||||
4226 | return 0; | |||
4227 | } | |||
4228 | ||||
4229 | /// Try to parse a register name. The token must be an Identifier when called. | |||
4230 | /// If it's a register, an AsmOperand is created. Another AsmOperand is created | |||
4231 | /// if there is a "writeback". 'true' if it's not a register. | |||
4232 | /// | |||
4233 | /// TODO this is likely to change to allow different register types and or to | |||
4234 | /// parse for a specific register type. | |||
4235 | bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) { | |||
4236 | MCAsmParser &Parser = getParser(); | |||
4237 | SMLoc RegStartLoc = Parser.getTok().getLoc(); | |||
4238 | SMLoc RegEndLoc = Parser.getTok().getEndLoc(); | |||
4239 | int RegNo = tryParseRegister(); | |||
4240 | if (RegNo == -1) | |||
4241 | return true; | |||
4242 | ||||
4243 | Operands.push_back(ARMOperand::CreateReg(RegNo, RegStartLoc, RegEndLoc)); | |||
4244 | ||||
4245 | const AsmToken &ExclaimTok = Parser.getTok(); | |||
4246 | if (ExclaimTok.is(AsmToken::Exclaim)) { | |||
4247 | Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(), | |||
4248 | ExclaimTok.getLoc())); | |||
4249 | Parser.Lex(); // Eat exclaim token | |||
4250 | return false; | |||
4251 | } | |||
4252 | ||||
4253 | // Also check for an index operand. This is only legal for vector registers, | |||
4254 | // but that'll get caught OK in operand matching, so we don't need to | |||
4255 | // explicitly filter everything else out here. | |||
4256 | if (Parser.getTok().is(AsmToken::LBrac)) { | |||
4257 | SMLoc SIdx = Parser.getTok().getLoc(); | |||
4258 | Parser.Lex(); // Eat left bracket token. | |||
4259 | ||||
4260 | const MCExpr *ImmVal; | |||
4261 | if (getParser().parseExpression(ImmVal)) | |||
4262 | return true; | |||
4263 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); | |||
4264 | if (!MCE) | |||
4265 | return TokError("immediate value expected for vector index"); | |||
4266 | ||||
4267 | if (Parser.getTok().isNot(AsmToken::RBrac)) | |||
4268 | return Error(Parser.getTok().getLoc(), "']' expected"); | |||
4269 | ||||
4270 | SMLoc E = Parser.getTok().getEndLoc(); | |||
4271 | Parser.Lex(); // Eat right bracket token. | |||
4272 | ||||
4273 | Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(), | |||
4274 | SIdx, E, | |||
4275 | getContext())); | |||
4276 | } | |||
4277 | ||||
4278 | return false; | |||
4279 | } | |||
4280 | ||||
4281 | /// MatchCoprocessorOperandName - Try to parse an coprocessor related | |||
4282 | /// instruction with a symbolic operand name. | |||
4283 | /// We accept "crN" syntax for GAS compatibility. | |||
4284 | /// <operand-name> ::= <prefix><number> | |||
4285 | /// If CoprocOp is 'c', then: | |||
4286 | /// <prefix> ::= c | cr | |||
4287 | /// If CoprocOp is 'p', then : | |||
4288 | /// <prefix> ::= p | |||
4289 | /// <number> ::= integer in range [0, 15] | |||
4290 | static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) { | |||
4291 | // Use the same layout as the tablegen'erated register name matcher. Ugly, | |||
4292 | // but efficient. | |||
4293 | if (Name.size() < 2 || Name[0] != CoprocOp) | |||
4294 | return -1; | |||
4295 | Name = (Name[1] == 'r') ? Name.drop_front(2) : Name.drop_front(); | |||
4296 | ||||
4297 | switch (Name.size()) { | |||
4298 | default: return -1; | |||
4299 | case 1: | |||
4300 | switch (Name[0]) { | |||
4301 | default: return -1; | |||
4302 | case '0': return 0; | |||
4303 | case '1': return 1; | |||
4304 | case '2': return 2; | |||
4305 | case '3': return 3; | |||
4306 | case '4': return 4; | |||
4307 | case '5': return 5; | |||
4308 | case '6': return 6; | |||
4309 | case '7': return 7; | |||
4310 | case '8': return 8; | |||
4311 | case '9': return 9; | |||
4312 | } | |||
4313 | case 2: | |||
4314 | if (Name[0] != '1') | |||
4315 | return -1; | |||
4316 | switch (Name[1]) { | |||
4317 | default: return -1; | |||
4318 | // CP10 and CP11 are VFP/NEON and so vector instructions should be used. | |||
4319 | // However, old cores (v5/v6) did use them in that way. | |||
4320 | case '0': return 10; | |||
4321 | case '1': return 11; | |||
4322 | case '2': return 12; | |||
4323 | case '3': return 13; | |||
4324 | case '4': return 14; | |||
4325 | case '5': return 15; | |||
4326 | } | |||
4327 | } | |||
4328 | } | |||
4329 | ||||
4330 | /// parseITCondCode - Try to parse a condition code for an IT instruction. | |||
4331 | OperandMatchResultTy | |||
4332 | ARMAsmParser::parseITCondCode(OperandVector &Operands) { | |||
4333 | MCAsmParser &Parser = getParser(); | |||
4334 | SMLoc S = Parser.getTok().getLoc(); | |||
4335 | const AsmToken &Tok = Parser.getTok(); | |||
4336 | if (!Tok.is(AsmToken::Identifier)) | |||
4337 | return MatchOperand_NoMatch; | |||
4338 | unsigned CC = ARMCondCodeFromString(Tok.getString()); | |||
4339 | if (CC == ~0U) | |||
4340 | return MatchOperand_NoMatch; | |||
4341 | Parser.Lex(); // Eat the token. | |||
4342 | ||||
4343 | Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S)); | |||
4344 | ||||
4345 | return MatchOperand_Success; | |||
4346 | } | |||
4347 | ||||
4348 | /// parseCoprocNumOperand - Try to parse an coprocessor number operand. The | |||
4349 | /// token must be an Identifier when called, and if it is a coprocessor | |||
4350 | /// number, the token is eaten and the operand is added to the operand list. | |||
4351 | OperandMatchResultTy | |||
4352 | ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) { | |||
4353 | MCAsmParser &Parser = getParser(); | |||
4354 | SMLoc S = Parser.getTok().getLoc(); | |||
4355 | const AsmToken &Tok = Parser.getTok(); | |||
4356 | if (Tok.isNot(AsmToken::Identifier)) | |||
4357 | return MatchOperand_NoMatch; | |||
4358 | ||||
4359 | int Num = MatchCoprocessorOperandName(Tok.getString().lower(), 'p'); | |||
4360 | if (Num == -1) | |||
4361 | return MatchOperand_NoMatch; | |||
4362 | if (!isValidCoprocessorNumber(Num, getSTI().getFeatureBits())) | |||
4363 | return MatchOperand_NoMatch; | |||
4364 | ||||
4365 | Parser.Lex(); // Eat identifier token. | |||
4366 | Operands.push_back(ARMOperand::CreateCoprocNum(Num, S)); | |||
4367 | return MatchOperand_Success; | |||
4368 | } | |||
4369 | ||||
4370 | /// parseCoprocRegOperand - Try to parse an coprocessor register operand. The | |||
4371 | /// token must be an Identifier when called, and if it is a coprocessor | |||
4372 | /// number, the token is eaten and the operand is added to the operand list. | |||
4373 | OperandMatchResultTy | |||
4374 | ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) { | |||
4375 | MCAsmParser &Parser = getParser(); | |||
4376 | SMLoc S = Parser.getTok().getLoc(); | |||
4377 | const AsmToken &Tok = Parser.getTok(); | |||
4378 | if (Tok.isNot(AsmToken::Identifier)) | |||
4379 | return MatchOperand_NoMatch; | |||
4380 | ||||
4381 | int Reg = MatchCoprocessorOperandName(Tok.getString().lower(), 'c'); | |||
4382 | if (Reg == -1) | |||
4383 | return MatchOperand_NoMatch; | |||
4384 | ||||
4385 | Parser.Lex(); // Eat identifier token. | |||
4386 | Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S)); | |||
4387 | return MatchOperand_Success; | |||
4388 | } | |||
4389 | ||||
4390 | /// parseCoprocOptionOperand - Try to parse an coprocessor option operand. | |||
4391 | /// coproc_option : '{' imm0_255 '}' | |||
4392 | OperandMatchResultTy | |||
4393 | ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands) { | |||
4394 | MCAsmParser &Parser = getParser(); | |||
4395 | SMLoc S = Parser.getTok().getLoc(); | |||
4396 | ||||
4397 | // If this isn't a '{', this isn't a coprocessor immediate operand. | |||
4398 | if (Parser.getTok().isNot(AsmToken::LCurly)) | |||
4399 | return MatchOperand_NoMatch; | |||
4400 | Parser.Lex(); // Eat the '{' | |||
4401 | ||||
4402 | const MCExpr *Expr; | |||
4403 | SMLoc Loc = Parser.getTok().getLoc(); | |||
4404 | if (getParser().parseExpression(Expr)) { | |||
4405 | Error(Loc, "illegal expression"); | |||
4406 | return MatchOperand_ParseFail; | |||
4407 | } | |||
4408 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); | |||
4409 | if (!CE || CE->getValue() < 0 || CE->getValue() > 255) { | |||
4410 | Error(Loc, "coprocessor option must be an immediate in range [0, 255]"); | |||
4411 | return MatchOperand_ParseFail; | |||
4412 | } | |||
4413 | int Val = CE->getValue(); | |||
4414 | ||||
4415 | // Check for and consume the closing '}' | |||
4416 | if (Parser.getTok().isNot(AsmToken::RCurly)) | |||
4417 | return MatchOperand_ParseFail; | |||
4418 | SMLoc E = Parser.getTok().getEndLoc(); | |||
4419 | Parser.Lex(); // Eat the '}' | |||
4420 | ||||
4421 | Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E)); | |||
4422 | return MatchOperand_Success; | |||
4423 | } | |||
4424 | ||||
4425 | // For register list parsing, we need to map from raw GPR register numbering | |||
4426 | // to the enumeration values. The enumeration values aren't sorted by | |||
4427 | // register number due to our using "sp", "lr" and "pc" as canonical names. | |||
4428 | static unsigned getNextRegister(unsigned Reg) { | |||
4429 | // If this is a GPR, we need to do it manually, otherwise we can rely | |||
4430 | // on the sort ordering of the enumeration since the other reg-classes | |||
4431 | // are sane. | |||
4432 | if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) | |||
4433 | return Reg + 1; | |||
4434 | switch(Reg) { | |||
4435 | default: llvm_unreachable("Invalid GPR number!")::llvm::llvm_unreachable_internal("Invalid GPR number!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 4435); | |||
4436 | case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2; | |||
4437 | case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4; | |||
4438 | case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6; | |||
4439 | case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8; | |||
4440 | case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10; | |||
4441 | case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12; | |||
4442 | case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR; | |||
4443 | case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0; | |||
4444 | } | |||
4445 | } | |||
4446 | ||||
4447 | // Insert an <Encoding, Register> pair in an ordered vector. Return true on | |||
4448 | // success, or false, if duplicate encoding found. | |||
4449 | static bool | |||
4450 | insertNoDuplicates(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs, | |||
4451 | unsigned Enc, unsigned Reg) { | |||
4452 | Regs.emplace_back(Enc, Reg); | |||
4453 | for (auto I = Regs.rbegin(), J = I + 1, E = Regs.rend(); J != E; ++I, ++J) { | |||
4454 | if (J->first == Enc) { | |||
4455 | Regs.erase(J.base()); | |||
4456 | return false; | |||
4457 | } | |||
4458 | if (J->first < Enc) | |||
4459 | break; | |||
4460 | std::swap(*I, *J); | |||
4461 | } | |||
4462 | return true; | |||
4463 | } | |||
4464 | ||||
4465 | /// Parse a register list. | |||
4466 | bool ARMAsmParser::parseRegisterList(OperandVector &Operands, | |||
4467 | bool EnforceOrder) { | |||
4468 | MCAsmParser &Parser = getParser(); | |||
4469 | if (Parser.getTok().isNot(AsmToken::LCurly)) | |||
4470 | return TokError("Token is not a Left Curly Brace"); | |||
4471 | SMLoc S = Parser.getTok().getLoc(); | |||
4472 | Parser.Lex(); // Eat '{' token. | |||
4473 | SMLoc RegLoc = Parser.getTok().getLoc(); | |||
4474 | ||||
4475 | // Check the first register in the list to see what register class | |||
4476 | // this is a list of. | |||
4477 | int Reg = tryParseRegister(); | |||
4478 | if (Reg == -1) | |||
4479 | return Error(RegLoc, "register expected"); | |||
4480 | ||||
4481 | // The reglist instructions have at most 16 registers, so reserve | |||
4482 | // space for that many. | |||
4483 | int EReg = 0; | |||
4484 | SmallVector<std::pair<unsigned, unsigned>, 16> Registers; | |||
4485 | ||||
4486 | // Allow Q regs and just interpret them as the two D sub-registers. | |||
4487 | if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { | |||
4488 | Reg = getDRegFromQReg(Reg); | |||
4489 | EReg = MRI->getEncodingValue(Reg); | |||
4490 | Registers.emplace_back(EReg, Reg); | |||
4491 | ++Reg; | |||
4492 | } | |||
4493 | const MCRegisterClass *RC; | |||
4494 | if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) | |||
4495 | RC = &ARMMCRegisterClasses[ARM::GPRRegClassID]; | |||
4496 | else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) | |||
4497 | RC = &ARMMCRegisterClasses[ARM::DPRRegClassID]; | |||
4498 | else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg)) | |||
4499 | RC = &ARMMCRegisterClasses[ARM::SPRRegClassID]; | |||
4500 | else if (ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg)) | |||
4501 | RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID]; | |||
4502 | else | |||
4503 | return Error(RegLoc, "invalid register in register list"); | |||
4504 | ||||
4505 | // Store the register. | |||
4506 | EReg = MRI->getEncodingValue(Reg); | |||
4507 | Registers.emplace_back(EReg, Reg); | |||
4508 | ||||
4509 | // This starts immediately after the first register token in the list, | |||
4510 | // so we can see either a comma or a minus (range separator) as a legal | |||
4511 | // next token. | |||
4512 | while (Parser.getTok().is(AsmToken::Comma) || | |||
4513 | Parser.getTok().is(AsmToken::Minus)) { | |||
4514 | if (Parser.getTok().is(AsmToken::Minus)) { | |||
4515 | Parser.Lex(); // Eat the minus. | |||
4516 | SMLoc AfterMinusLoc = Parser.getTok().getLoc(); | |||
4517 | int EndReg = tryParseRegister(); | |||
4518 | if (EndReg == -1) | |||
4519 | return Error(AfterMinusLoc, "register expected"); | |||
4520 | // Allow Q regs and just interpret them as the two D sub-registers. | |||
4521 | if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg)) | |||
4522 | EndReg = getDRegFromQReg(EndReg) + 1; | |||
4523 | // If the register is the same as the start reg, there's nothing | |||
4524 | // more to do. | |||
4525 | if (Reg == EndReg) | |||
4526 | continue; | |||
4527 | // The register must be in the same register class as the first. | |||
4528 | if (!RC->contains(EndReg)) | |||
4529 | return Error(AfterMinusLoc, "invalid register in register list"); | |||
4530 | // Ranges must go from low to high. | |||
4531 | if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg)) | |||
4532 | return Error(AfterMinusLoc, "bad range in register list"); | |||
4533 | ||||
4534 | // Add all the registers in the range to the register list. | |||
4535 | while (Reg != EndReg) { | |||
4536 | Reg = getNextRegister(Reg); | |||
4537 | EReg = MRI->getEncodingValue(Reg); | |||
4538 | if (!insertNoDuplicates(Registers, EReg, Reg)) { | |||
4539 | Warning(AfterMinusLoc, StringRef("duplicated register (") + | |||
4540 | ARMInstPrinter::getRegisterName(Reg) + | |||
4541 | ") in register list"); | |||
4542 | } | |||
4543 | } | |||
4544 | continue; | |||
4545 | } | |||
4546 | Parser.Lex(); // Eat the comma. | |||
4547 | RegLoc = Parser.getTok().getLoc(); | |||
4548 | int OldReg = Reg; | |||
4549 | const AsmToken RegTok = Parser.getTok(); | |||
4550 | Reg = tryParseRegister(); | |||
4551 | if (Reg == -1) | |||
4552 | return Error(RegLoc, "register expected"); | |||
4553 | // Allow Q regs and just interpret them as the two D sub-registers. | |||
4554 | bool isQReg = false; | |||
4555 | if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { | |||
4556 | Reg = getDRegFromQReg(Reg); | |||
4557 | isQReg = true; | |||
4558 | } | |||
4559 | if (!RC->contains(Reg) && | |||
4560 | RC->getID() == ARMMCRegisterClasses[ARM::GPRRegClassID].getID() && | |||
4561 | ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg)) { | |||
4562 | // switch the register classes, as GPRwithAPSRnospRegClassID is a partial | |||
4563 | // subset of GPRRegClassId except it contains APSR as well. | |||
4564 | RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID]; | |||
4565 | } | |||
4566 | if (Reg == ARM::VPR && | |||
4567 | (RC == &ARMMCRegisterClasses[ARM::SPRRegClassID] || | |||
4568 | RC == &ARMMCRegisterClasses[ARM::DPRRegClassID] || | |||
4569 | RC == &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID])) { | |||
4570 | RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID]; | |||
4571 | EReg = MRI->getEncodingValue(Reg); | |||
4572 | if (!insertNoDuplicates(Registers, EReg, Reg)) { | |||
4573 | Warning(RegLoc, "duplicated register (" + RegTok.getString() + | |||
4574 | ") in register list"); | |||
4575 | } | |||
4576 | continue; | |||
4577 | } | |||
4578 | // The register must be in the same register class as the first. | |||
4579 | if (!RC->contains(Reg)) | |||
4580 | return Error(RegLoc, "invalid register in register list"); | |||
4581 | // In most cases, the list must be monotonically increasing. An | |||
4582 | // exception is CLRM, which is order-independent anyway, so | |||
4583 | // there's no potential for confusion if you write clrm {r2,r1} | |||
4584 | // instead of clrm {r1,r2}. | |||
4585 | if (EnforceOrder && | |||
4586 | MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) { | |||
4587 | if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) | |||
4588 | Warning(RegLoc, "register list not in ascending order"); | |||
4589 | else if (!ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg)) | |||
4590 | return Error(RegLoc, "register list not in ascending order"); | |||
4591 | } | |||
4592 | // VFP register lists must also be contiguous. | |||
4593 | if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] && | |||
4594 | RC != &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID] && | |||
4595 | Reg != OldReg + 1) | |||
4596 | return Error(RegLoc, "non-contiguous register range"); | |||
4597 | EReg = MRI->getEncodingValue(Reg); | |||
4598 | if (!insertNoDuplicates(Registers, EReg, Reg)) { | |||
4599 | Warning(RegLoc, "duplicated register (" + RegTok.getString() + | |||
4600 | ") in register list"); | |||
4601 | } | |||
4602 | if (isQReg) { | |||
4603 | EReg = MRI->getEncodingValue(++Reg); | |||
4604 | Registers.emplace_back(EReg, Reg); | |||
4605 | } | |||
4606 | } | |||
4607 | ||||
4608 | if (Parser.getTok().isNot(AsmToken::RCurly)) | |||
4609 | return Error(Parser.getTok().getLoc(), "'}' expected"); | |||
4610 | SMLoc E = Parser.getTok().getEndLoc(); | |||
4611 | Parser.Lex(); // Eat '}' token. | |||
4612 | ||||
4613 | // Push the register list operand. | |||
4614 | Operands.push_back(ARMOperand::CreateRegList(Registers, S, E)); | |||
4615 | ||||
4616 | // The ARM system instruction variants for LDM/STM have a '^' token here. | |||
4617 | if (Parser.getTok().is(AsmToken::Caret)) { | |||
4618 | Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc())); | |||
4619 | Parser.Lex(); // Eat '^' token. | |||
4620 | } | |||
4621 | ||||
4622 | return false; | |||
4623 | } | |||
4624 | ||||
4625 | // Helper function to parse the lane index for vector lists. | |||
4626 | OperandMatchResultTy ARMAsmParser:: | |||
4627 | parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) { | |||
4628 | MCAsmParser &Parser = getParser(); | |||
4629 | Index = 0; // Always return a defined index value. | |||
4630 | if (Parser.getTok().is(AsmToken::LBrac)) { | |||
4631 | Parser.Lex(); // Eat the '['. | |||
4632 | if (Parser.getTok().is(AsmToken::RBrac)) { | |||
4633 | // "Dn[]" is the 'all lanes' syntax. | |||
4634 | LaneKind = AllLanes; | |||
4635 | EndLoc = Parser.getTok().getEndLoc(); | |||
4636 | Parser.Lex(); // Eat the ']'. | |||
4637 | return MatchOperand_Success; | |||
4638 | } | |||
4639 | ||||
4640 | // There's an optional '#' token here. Normally there wouldn't be, but | |||
4641 | // inline assemble puts one in, and it's friendly to accept that. | |||
4642 | if (Parser.getTok().is(AsmToken::Hash)) | |||
4643 | Parser.Lex(); // Eat '#' or '$'. | |||
4644 | ||||
4645 | const MCExpr *LaneIndex; | |||
4646 | SMLoc Loc = Parser.getTok().getLoc(); | |||
4647 | if (getParser().parseExpression(LaneIndex)) { | |||
4648 | Error(Loc, "illegal expression"); | |||
4649 | return MatchOperand_ParseFail; | |||
4650 | } | |||
4651 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex); | |||
4652 | if (!CE) { | |||
4653 | Error(Loc, "lane index must be empty or an integer"); | |||
4654 | return MatchOperand_ParseFail; | |||
4655 | } | |||
4656 | if (Parser.getTok().isNot(AsmToken::RBrac)) { | |||
4657 | Error(Parser.getTok().getLoc(), "']' expected"); | |||
4658 | return MatchOperand_ParseFail; | |||
4659 | } | |||
4660 | EndLoc = Parser.getTok().getEndLoc(); | |||
4661 | Parser.Lex(); // Eat the ']'. | |||
4662 | int64_t Val = CE->getValue(); | |||
4663 | ||||
4664 | // FIXME: Make this range check context sensitive for .8, .16, .32. | |||
4665 | if (Val < 0 || Val > 7) { | |||
4666 | Error(Parser.getTok().getLoc(), "lane index out of range"); | |||
4667 | return MatchOperand_ParseFail; | |||
4668 | } | |||
4669 | Index = Val; | |||
4670 | LaneKind = IndexedLane; | |||
4671 | return MatchOperand_Success; | |||
4672 | } | |||
4673 | LaneKind = NoLanes; | |||
4674 | return MatchOperand_Success; | |||
4675 | } | |||
4676 | ||||
4677 | // parse a vector register list | |||
4678 | OperandMatchResultTy | |||
4679 | ARMAsmParser::parseVectorList(OperandVector &Operands) { | |||
4680 | MCAsmParser &Parser = getParser(); | |||
4681 | VectorLaneTy LaneKind; | |||
4682 | unsigned LaneIndex; | |||
4683 | SMLoc S = Parser.getTok().getLoc(); | |||
4684 | // As an extension (to match gas), support a plain D register or Q register | |||
4685 | // (without encosing curly braces) as a single or double entry list, | |||
4686 | // respectively. | |||
4687 | if (!hasMVE() && Parser.getTok().is(AsmToken::Identifier)) { | |||
4688 | SMLoc E = Parser.getTok().getEndLoc(); | |||
4689 | int Reg = tryParseRegister(); | |||
4690 | if (Reg == -1) | |||
4691 | return MatchOperand_NoMatch; | |||
4692 | if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) { | |||
4693 | OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E); | |||
4694 | if (Res != MatchOperand_Success) | |||
4695 | return Res; | |||
4696 | switch (LaneKind) { | |||
4697 | case NoLanes: | |||
4698 | Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E)); | |||
4699 | break; | |||
4700 | case AllLanes: | |||
4701 | Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false, | |||
4702 | S, E)); | |||
4703 | break; | |||
4704 | case IndexedLane: | |||
4705 | Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1, | |||
4706 | LaneIndex, | |||
4707 | false, S, E)); | |||
4708 | break; | |||
4709 | } | |||
4710 | return MatchOperand_Success; | |||
4711 | } | |||
4712 | if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { | |||
4713 | Reg = getDRegFromQReg(Reg); | |||
4714 | OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E); | |||
4715 | if (Res != MatchOperand_Success) | |||
4716 | return Res; | |||
4717 | switch (LaneKind) { | |||
4718 | case NoLanes: | |||
4719 | Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0, | |||
4720 | &ARMMCRegisterClasses[ARM::DPairRegClassID]); | |||
4721 | Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E)); | |||
4722 | break; | |||
4723 | case AllLanes: | |||
4724 | Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0, | |||
4725 | &ARMMCRegisterClasses[ARM::DPairRegClassID]); | |||
4726 | Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false, | |||
4727 | S, E)); | |||
4728 | break; | |||
4729 | case IndexedLane: | |||
4730 | Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2, | |||
4731 | LaneIndex, | |||
4732 | false, S, E)); | |||
4733 | break; | |||
4734 | } | |||
4735 | return MatchOperand_Success; | |||
4736 | } | |||
4737 | Error(S, "vector register expected"); | |||
4738 | return MatchOperand_ParseFail; | |||
4739 | } | |||
4740 | ||||
4741 | if (Parser.getTok().isNot(AsmToken::LCurly)) | |||
4742 | return MatchOperand_NoMatch; | |||
4743 | ||||
4744 | Parser.Lex(); // Eat '{' token. | |||
4745 | SMLoc RegLoc = Parser.getTok().getLoc(); | |||
4746 | ||||
4747 | int Reg = tryParseRegister(); | |||
4748 | if (Reg == -1) { | |||
4749 | Error(RegLoc, "register expected"); | |||
4750 | return MatchOperand_ParseFail; | |||
4751 | } | |||
4752 | unsigned Count = 1; | |||
4753 | int Spacing = 0; | |||
4754 | unsigned FirstReg = Reg; | |||
4755 | ||||
4756 | if (hasMVE() && !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg)) { | |||
4757 | Error(Parser.getTok().getLoc(), "vector register in range Q0-Q7 expected"); | |||
4758 | return MatchOperand_ParseFail; | |||
4759 | } | |||
4760 | // The list is of D registers, but we also allow Q regs and just interpret | |||
4761 | // them as the two D sub-registers. | |||
4762 | else if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { | |||
4763 | FirstReg = Reg = getDRegFromQReg(Reg); | |||
4764 | Spacing = 1; // double-spacing requires explicit D registers, otherwise | |||
4765 | // it's ambiguous with four-register single spaced. | |||
4766 | ++Reg; | |||
4767 | ++Count; | |||
4768 | } | |||
4769 | ||||
4770 | SMLoc E; | |||
4771 | if (parseVectorLane(LaneKind, LaneIndex, E) != MatchOperand_Success) | |||
4772 | return MatchOperand_ParseFail; | |||
4773 | ||||
4774 | while (Parser.getTok().is(AsmToken::Comma) || | |||
4775 | Parser.getTok().is(AsmToken::Minus)) { | |||
4776 | if (Parser.getTok().is(AsmToken::Minus)) { | |||
4777 | if (!Spacing) | |||
4778 | Spacing = 1; // Register range implies a single spaced list. | |||
4779 | else if (Spacing == 2) { | |||
4780 | Error(Parser.getTok().getLoc(), | |||
4781 | "sequential registers in double spaced list"); | |||
4782 | return MatchOperand_ParseFail; | |||
4783 | } | |||
4784 | Parser.Lex(); // Eat the minus. | |||
4785 | SMLoc AfterMinusLoc = Parser.getTok().getLoc(); | |||
4786 | int EndReg = tryParseRegister(); | |||
4787 | if (EndReg == -1) { | |||
4788 | Error(AfterMinusLoc, "register expected"); | |||
4789 | return MatchOperand_ParseFail; | |||
4790 | } | |||
4791 | // Allow Q regs and just interpret them as the two D sub-registers. | |||
4792 | if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg)) | |||
4793 | EndReg = getDRegFromQReg(EndReg) + 1; | |||
4794 | // If the register is the same as the start reg, there's nothing | |||
4795 | // more to do. | |||
4796 | if (Reg == EndReg) | |||
4797 | continue; | |||
4798 | // The register must be in the same register class as the first. | |||
4799 | if ((hasMVE() && | |||
4800 | !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(EndReg)) || | |||
4801 | (!hasMVE() && | |||
4802 | !ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg))) { | |||
4803 | Error(AfterMinusLoc, "invalid register in register list"); | |||
4804 | return MatchOperand_ParseFail; | |||
4805 | } | |||
4806 | // Ranges must go from low to high. | |||
4807 | if (Reg > EndReg) { | |||
4808 | Error(AfterMinusLoc, "bad range in register list"); | |||
4809 | return MatchOperand_ParseFail; | |||
4810 | } | |||
4811 | // Parse the lane specifier if present. | |||
4812 | VectorLaneTy NextLaneKind; | |||
4813 | unsigned NextLaneIndex; | |||
4814 | if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != | |||
4815 | MatchOperand_Success) | |||
4816 | return MatchOperand_ParseFail; | |||
4817 | if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { | |||
4818 | Error(AfterMinusLoc, "mismatched lane index in register list"); | |||
4819 | return MatchOperand_ParseFail; | |||
4820 | } | |||
4821 | ||||
4822 | // Add all the registers in the range to the register list. | |||
4823 | Count += EndReg - Reg; | |||
4824 | Reg = EndReg; | |||
4825 | continue; | |||
4826 | } | |||
4827 | Parser.Lex(); // Eat the comma. | |||
4828 | RegLoc = Parser.getTok().getLoc(); | |||
4829 | int OldReg = Reg; | |||
4830 | Reg = tryParseRegister(); | |||
4831 | if (Reg == -1) { | |||
4832 | Error(RegLoc, "register expected"); | |||
4833 | return MatchOperand_ParseFail; | |||
4834 | } | |||
4835 | ||||
4836 | if (hasMVE()) { | |||
4837 | if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg)) { | |||
4838 | Error(RegLoc, "vector register in range Q0-Q7 expected"); | |||
4839 | return MatchOperand_ParseFail; | |||
4840 | } | |||
4841 | Spacing = 1; | |||
4842 | } | |||
4843 | // vector register lists must be contiguous. | |||
4844 | // It's OK to use the enumeration values directly here rather, as the | |||
4845 | // VFP register classes have the enum sorted properly. | |||
4846 | // | |||
4847 | // The list is of D registers, but we also allow Q regs and just interpret | |||
4848 | // them as the two D sub-registers. | |||
4849 | else if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { | |||
4850 | if (!Spacing) | |||
4851 | Spacing = 1; // Register range implies a single spaced list. | |||
4852 | else if (Spacing == 2) { | |||
4853 | Error(RegLoc, | |||
4854 | "invalid register in double-spaced list (must be 'D' register')"); | |||
4855 | return MatchOperand_ParseFail; | |||
4856 | } | |||
4857 | Reg = getDRegFromQReg(Reg); | |||
4858 | if (Reg != OldReg + 1) { | |||
4859 | Error(RegLoc, "non-contiguous register range"); | |||
4860 | return MatchOperand_ParseFail; | |||
4861 | } | |||
4862 | ++Reg; | |||
4863 | Count += 2; | |||
4864 | // Parse the lane specifier if present. | |||
4865 | VectorLaneTy NextLaneKind; | |||
4866 | unsigned NextLaneIndex; | |||
4867 | SMLoc LaneLoc = Parser.getTok().getLoc(); | |||
4868 | if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != | |||
4869 | MatchOperand_Success) | |||
4870 | return MatchOperand_ParseFail; | |||
4871 | if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { | |||
4872 | Error(LaneLoc, "mismatched lane index in register list"); | |||
4873 | return MatchOperand_ParseFail; | |||
4874 | } | |||
4875 | continue; | |||
4876 | } | |||
4877 | // Normal D register. | |||
4878 | // Figure out the register spacing (single or double) of the list if | |||
4879 | // we don't know it already. | |||
4880 | if (!Spacing) | |||
4881 | Spacing = 1 + (Reg == OldReg + 2); | |||
4882 | ||||
4883 | // Just check that it's contiguous and keep going. | |||
4884 | if (Reg != OldReg + Spacing) { | |||
4885 | Error(RegLoc, "non-contiguous register range"); | |||
4886 | return MatchOperand_ParseFail; | |||
4887 | } | |||
4888 | ++Count; | |||
4889 | // Parse the lane specifier if present. | |||
4890 | VectorLaneTy NextLaneKind; | |||
4891 | unsigned NextLaneIndex; | |||
4892 | SMLoc EndLoc = Parser.getTok().getLoc(); | |||
4893 | if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != MatchOperand_Success) | |||
4894 | return MatchOperand_ParseFail; | |||
4895 | if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { | |||
4896 | Error(EndLoc, "mismatched lane index in register list"); | |||
4897 | return MatchOperand_ParseFail; | |||
4898 | } | |||
4899 | } | |||
4900 | ||||
4901 | if (Parser.getTok().isNot(AsmToken::RCurly)) { | |||
4902 | Error(Parser.getTok().getLoc(), "'}' expected"); | |||
4903 | return MatchOperand_ParseFail; | |||
4904 | } | |||
4905 | E = Parser.getTok().getEndLoc(); | |||
4906 | Parser.Lex(); // Eat '}' token. | |||
4907 | ||||
4908 | switch (LaneKind) { | |||
4909 | case NoLanes: | |||
4910 | case AllLanes: { | |||
4911 | // Two-register operands have been converted to the | |||
4912 | // composite register classes. | |||
4913 | if (Count == 2 && !hasMVE()) { | |||
4914 | const MCRegisterClass *RC = (Spacing == 1) ? | |||
4915 | &ARMMCRegisterClasses[ARM::DPairRegClassID] : | |||
4916 | &ARMMCRegisterClasses[ARM::DPairSpcRegClassID]; | |||
4917 | FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC); | |||
4918 | } | |||
4919 | auto Create = (LaneKind == NoLanes ? ARMOperand::CreateVectorList : | |||
4920 | ARMOperand::CreateVectorListAllLanes); | |||
4921 | Operands.push_back(Create(FirstReg, Count, (Spacing == 2), S, E)); | |||
4922 | break; | |||
4923 | } | |||
4924 | case IndexedLane: | |||
4925 | Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count, | |||
4926 | LaneIndex, | |||
4927 | (Spacing == 2), | |||
4928 | S, E)); | |||
4929 | break; | |||
4930 | } | |||
4931 | return MatchOperand_Success; | |||
4932 | } | |||
4933 | ||||
4934 | /// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options. | |||
4935 | OperandMatchResultTy | |||
4936 | ARMAsmParser::parseMemBarrierOptOperand(OperandVector &Operands) { | |||
4937 | MCAsmParser &Parser = getParser(); | |||
4938 | SMLoc S = Parser.getTok().getLoc(); | |||
4939 | const AsmToken &Tok = Parser.getTok(); | |||
4940 | unsigned Opt; | |||
4941 | ||||
4942 | if (Tok.is(AsmToken::Identifier)) { | |||
4943 | StringRef OptStr = Tok.getString(); | |||
4944 | ||||
4945 | Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower()) | |||
4946 | .Case("sy", ARM_MB::SY) | |||
4947 | .Case("st", ARM_MB::ST) | |||
4948 | .Case("ld", ARM_MB::LD) | |||
4949 | .Case("sh", ARM_MB::ISH) | |||
4950 | .Case("ish", ARM_MB::ISH) | |||
4951 | .Case("shst", ARM_MB::ISHST) | |||
4952 | .Case("ishst", ARM_MB::ISHST) | |||
4953 | .Case("ishld", ARM_MB::ISHLD) | |||
4954 | .Case("nsh", ARM_MB::NSH) | |||
4955 | .Case("un", ARM_MB::NSH) | |||
4956 | .Case("nshst", ARM_MB::NSHST) | |||
4957 | .Case("nshld", ARM_MB::NSHLD) | |||
4958 | .Case("unst", ARM_MB::NSHST) | |||
4959 | .Case("osh", ARM_MB::OSH) | |||
4960 | .Case("oshst", ARM_MB::OSHST) | |||
4961 | .Case("oshld", ARM_MB::OSHLD) | |||
4962 | .Default(~0U); | |||
4963 | ||||
4964 | // ishld, oshld, nshld and ld are only available from ARMv8. | |||
4965 | if (!hasV8Ops() && (Opt == ARM_MB::ISHLD || Opt == ARM_MB::OSHLD || | |||
4966 | Opt == ARM_MB::NSHLD || Opt == ARM_MB::LD)) | |||
4967 | Opt = ~0U; | |||
4968 | ||||
4969 | if (Opt == ~0U) | |||
4970 | return MatchOperand_NoMatch; | |||
4971 | ||||
4972 | Parser.Lex(); // Eat identifier token. | |||
4973 | } else if (Tok.is(AsmToken::Hash) || | |||
4974 | Tok.is(AsmToken::Dollar) || | |||
4975 | Tok.is(AsmToken::Integer)) { | |||
4976 | if (Parser.getTok().isNot(AsmToken::Integer)) | |||
4977 | Parser.Lex(); // Eat '#' or '$'. | |||
4978 | SMLoc Loc = Parser.getTok().getLoc(); | |||
4979 | ||||
4980 | const MCExpr *MemBarrierID; | |||
4981 | if (getParser().parseExpression(MemBarrierID)) { | |||
4982 | Error(Loc, "illegal expression"); | |||
4983 | return MatchOperand_ParseFail; | |||
4984 | } | |||
4985 | ||||
4986 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID); | |||
4987 | if (!CE) { | |||
4988 | Error(Loc, "constant expression expected"); | |||
4989 | return MatchOperand_ParseFail; | |||
4990 | } | |||
4991 | ||||
4992 | int Val = CE->getValue(); | |||
4993 | if (Val & ~0xf) { | |||
4994 | Error(Loc, "immediate value out of range"); | |||
4995 | return MatchOperand_ParseFail; | |||
4996 | } | |||
4997 | ||||
4998 | Opt = ARM_MB::RESERVED_0 + Val; | |||
4999 | } else | |||
5000 | return MatchOperand_ParseFail; | |||
5001 | ||||
5002 | Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S)); | |||
5003 | return MatchOperand_Success; | |||
5004 | } | |||
5005 | ||||
5006 | OperandMatchResultTy | |||
5007 | ARMAsmParser::parseTraceSyncBarrierOptOperand(OperandVector &Operands) { | |||
5008 | MCAsmParser &Parser = getParser(); | |||
5009 | SMLoc S = Parser.getTok().getLoc(); | |||
5010 | const AsmToken &Tok = Parser.getTok(); | |||
5011 | ||||
5012 | if (Tok.isNot(AsmToken::Identifier)) | |||
5013 | return MatchOperand_NoMatch; | |||
5014 | ||||
5015 | if (!Tok.getString().equals_insensitive("csync")) | |||
5016 | return MatchOperand_NoMatch; | |||
5017 | ||||
5018 | Parser.Lex(); // Eat identifier token. | |||
5019 | ||||
5020 | Operands.push_back(ARMOperand::CreateTraceSyncBarrierOpt(ARM_TSB::CSYNC, S)); | |||
5021 | return MatchOperand_Success; | |||
5022 | } | |||
5023 | ||||
5024 | /// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options. | |||
5025 | OperandMatchResultTy | |||
5026 | ARMAsmParser::parseInstSyncBarrierOptOperand(OperandVector &Operands) { | |||
5027 | MCAsmParser &Parser = getParser(); | |||
5028 | SMLoc S = Parser.getTok().getLoc(); | |||
5029 | const AsmToken &Tok = Parser.getTok(); | |||
5030 | unsigned Opt; | |||
5031 | ||||
5032 | if (Tok.is(AsmToken::Identifier)) { | |||
5033 | StringRef OptStr = Tok.getString(); | |||
5034 | ||||
5035 | if (OptStr.equals_insensitive("sy")) | |||
5036 | Opt = ARM_ISB::SY; | |||
5037 | else | |||
5038 | return MatchOperand_NoMatch; | |||
5039 | ||||
5040 | Parser.Lex(); // Eat identifier token. | |||
5041 | } else if (Tok.is(AsmToken::Hash) || | |||
5042 | Tok.is(AsmToken::Dollar) || | |||
5043 | Tok.is(AsmToken::Integer)) { | |||
5044 | if (Parser.getTok().isNot(AsmToken::Integer)) | |||
5045 | Parser.Lex(); // Eat '#' or '$'. | |||
5046 | SMLoc Loc = Parser.getTok().getLoc(); | |||
5047 | ||||
5048 | const MCExpr *ISBarrierID; | |||
5049 | if (getParser().parseExpression(ISBarrierID)) { | |||
5050 | Error(Loc, "illegal expression"); | |||
5051 | return MatchOperand_ParseFail; | |||
5052 | } | |||
5053 | ||||
5054 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ISBarrierID); | |||
5055 | if (!CE) { | |||
5056 | Error(Loc, "constant expression expected"); | |||
5057 | return MatchOperand_ParseFail; | |||
5058 | } | |||
5059 | ||||
5060 | int Val = CE->getValue(); | |||
5061 | if (Val & ~0xf) { | |||
5062 | Error(Loc, "immediate value out of range"); | |||
5063 | return MatchOperand_ParseFail; | |||
5064 | } | |||
5065 | ||||
5066 | Opt = ARM_ISB::RESERVED_0 + Val; | |||
5067 | } else | |||
5068 | return MatchOperand_ParseFail; | |||
5069 | ||||
5070 | Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt( | |||
5071 | (ARM_ISB::InstSyncBOpt)Opt, S)); | |||
5072 | return MatchOperand_Success; | |||
5073 | } | |||
5074 | ||||
5075 | ||||
5076 | /// parseProcIFlagsOperand - Try to parse iflags from CPS instruction. | |||
5077 | OperandMatchResultTy | |||
5078 | ARMAsmParser::parseProcIFlagsOperand(OperandVector &Operands) { | |||
5079 | MCAsmParser &Parser = getParser(); | |||
5080 | SMLoc S = Parser.getTok().getLoc(); | |||
5081 | const AsmToken &Tok = Parser.getTok(); | |||
5082 | if (!Tok.is(AsmToken::Identifier)) | |||
5083 | return MatchOperand_NoMatch; | |||
5084 | StringRef IFlagsStr = Tok.getString(); | |||
5085 | ||||
5086 | // An iflags string of "none" is interpreted to mean that none of the AIF | |||
5087 | // bits are set. Not a terribly useful instruction, but a valid encoding. | |||
5088 | unsigned IFlags = 0; | |||
5089 | if (IFlagsStr != "none") { | |||
5090 | for (int i = 0, e = IFlagsStr.size(); i != e; ++i) { | |||
5091 | unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1).lower()) | |||
5092 | .Case("a", ARM_PROC::A) | |||
5093 | .Case("i", ARM_PROC::I) | |||
5094 | .Case("f", ARM_PROC::F) | |||
5095 | .Default(~0U); | |||
5096 | ||||
5097 | // If some specific iflag is already set, it means that some letter is | |||
5098 | // present more than once, this is not acceptable. | |||
5099 | if (Flag == ~0U || (IFlags & Flag)) | |||
5100 | return MatchOperand_NoMatch; | |||
5101 | ||||
5102 | IFlags |= Flag; | |||
5103 | } | |||
5104 | } | |||
5105 | ||||
5106 | Parser.Lex(); // Eat identifier token. | |||
5107 | Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S)); | |||
5108 | return MatchOperand_Success; | |||
5109 | } | |||
5110 | ||||
5111 | /// parseMSRMaskOperand - Try to parse mask flags from MSR instruction. | |||
5112 | OperandMatchResultTy | |||
5113 | ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) { | |||
5114 | MCAsmParser &Parser = getParser(); | |||
5115 | SMLoc S = Parser.getTok().getLoc(); | |||
5116 | const AsmToken &Tok = Parser.getTok(); | |||
5117 | ||||
5118 | if (Tok.is(AsmToken::Integer)) { | |||
5119 | int64_t Val = Tok.getIntVal(); | |||
5120 | if (Val > 255 || Val < 0) { | |||
5121 | return MatchOperand_NoMatch; | |||
5122 | } | |||
5123 | unsigned SYSmvalue = Val & 0xFF; | |||
5124 | Parser.Lex(); | |||
5125 | Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S)); | |||
5126 | return MatchOperand_Success; | |||
5127 | } | |||
5128 | ||||
5129 | if (!Tok.is(AsmToken::Identifier)) | |||
5130 | return MatchOperand_NoMatch; | |||
5131 | StringRef Mask = Tok.getString(); | |||
5132 | ||||
5133 | if (isMClass()) { | |||
5134 | auto TheReg = ARMSysReg::lookupMClassSysRegByName(Mask.lower()); | |||
5135 | if (!TheReg || !TheReg->hasRequiredFeatures(getSTI().getFeatureBits())) | |||
5136 | return MatchOperand_NoMatch; | |||
5137 | ||||
5138 | unsigned SYSmvalue = TheReg->Encoding & 0xFFF; | |||
5139 | ||||
5140 | Parser.Lex(); // Eat identifier token. | |||
5141 | Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S)); | |||
5142 | return MatchOperand_Success; | |||
5143 | } | |||
5144 | ||||
5145 | // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf" | |||
5146 | size_t Start = 0, Next = Mask.find('_'); | |||
5147 | StringRef Flags = ""; | |||
5148 | std::string SpecReg = Mask.slice(Start, Next).lower(); | |||
5149 | if (Next != StringRef::npos) | |||
5150 | Flags = Mask.slice(Next+1, Mask.size()); | |||
5151 | ||||
5152 | // FlagsVal contains the complete mask: | |||
5153 | // 3-0: Mask | |||
5154 | // 4: Special Reg (cpsr, apsr => 0; spsr => 1) | |||
5155 | unsigned FlagsVal = 0; | |||
5156 | ||||
5157 | if (SpecReg == "apsr") { | |||
5158 | FlagsVal = StringSwitch<unsigned>(Flags) | |||
5159 | .Case("nzcvq", 0x8) // same as CPSR_f | |||
5160 | .Case("g", 0x4) // same as CPSR_s | |||
5161 | .Case("nzcvqg", 0xc) // same as CPSR_fs | |||
5162 | .Default(~0U); | |||
5163 | ||||
5164 | if (FlagsVal == ~0U) { | |||
5165 | if (!Flags.empty()) | |||
5166 | return MatchOperand_NoMatch; | |||
5167 | else | |||
5168 | FlagsVal = 8; // No flag | |||
5169 | } | |||
5170 | } else if (SpecReg == "cpsr" || SpecReg == "spsr") { | |||
5171 | // cpsr_all is an alias for cpsr_fc, as is plain cpsr. | |||
5172 | if (Flags == "all" || Flags == "") | |||
5173 | Flags = "fc"; | |||
5174 | for (int i = 0, e = Flags.size(); i != e; ++i) { | |||
5175 | unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1)) | |||
5176 | .Case("c", 1) | |||
5177 | .Case("x", 2) | |||
5178 | .Case("s", 4) | |||
5179 | .Case("f", 8) | |||
5180 | .Default(~0U); | |||
5181 | ||||
5182 | // If some specific flag is already set, it means that some letter is | |||
5183 | // present more than once, this is not acceptable. | |||
5184 | if (Flag == ~0U || (FlagsVal & Flag)) | |||
5185 | return MatchOperand_NoMatch; | |||
5186 | FlagsVal |= Flag; | |||
5187 | } | |||
5188 | } else // No match for special register. | |||
5189 | return MatchOperand_NoMatch; | |||
5190 | ||||
5191 | // Special register without flags is NOT equivalent to "fc" flags. | |||
5192 | // NOTE: This is a divergence from gas' behavior. Uncommenting the following | |||
5193 | // two lines would enable gas compatibility at the expense of breaking | |||
5194 | // round-tripping. | |||
5195 | // | |||
5196 | // if (!FlagsVal) | |||
5197 | // FlagsVal = 0x9; | |||
5198 | ||||
5199 | // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1) | |||
5200 | if (SpecReg == "spsr") | |||
5201 | FlagsVal |= 16; | |||
5202 | ||||
5203 | Parser.Lex(); // Eat identifier token. | |||
5204 | Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); | |||
5205 | return MatchOperand_Success; | |||
5206 | } | |||
5207 | ||||
5208 | /// parseBankedRegOperand - Try to parse a banked register (e.g. "lr_irq") for | |||
5209 | /// use in the MRS/MSR instructions added to support virtualization. | |||
5210 | OperandMatchResultTy | |||
5211 | ARMAsmParser::parseBankedRegOperand(OperandVector &Operands) { | |||
5212 | MCAsmParser &Parser = getParser(); | |||
5213 | SMLoc S = Parser.getTok().getLoc(); | |||
5214 | const AsmToken &Tok = Parser.getTok(); | |||
5215 | if (!Tok.is(AsmToken::Identifier)) | |||
5216 | return MatchOperand_NoMatch; | |||
5217 | StringRef RegName = Tok.getString(); | |||
5218 | ||||
5219 | auto TheReg = ARMBankedReg::lookupBankedRegByName(RegName.lower()); | |||
5220 | if (!TheReg) | |||
5221 | return MatchOperand_NoMatch; | |||
5222 | unsigned Encoding = TheReg->Encoding; | |||
5223 | ||||
5224 | Parser.Lex(); // Eat identifier token. | |||
5225 | Operands.push_back(ARMOperand::CreateBankedReg(Encoding, S)); | |||
5226 | return MatchOperand_Success; | |||
5227 | } | |||
5228 | ||||
5229 | OperandMatchResultTy | |||
5230 | ARMAsmParser::parsePKHImm(OperandVector &Operands, StringRef Op, int Low, | |||
5231 | int High) { | |||
5232 | MCAsmParser &Parser = getParser(); | |||
5233 | const AsmToken &Tok = Parser.getTok(); | |||
5234 | if (Tok.isNot(AsmToken::Identifier)) { | |||
5235 | Error(Parser.getTok().getLoc(), Op + " operand expected."); | |||
5236 | return MatchOperand_ParseFail; | |||
5237 | } | |||
5238 | StringRef ShiftName = Tok.getString(); | |||
5239 | std::string LowerOp = Op.lower(); | |||
5240 | std::string UpperOp = Op.upper(); | |||
5241 | if (ShiftName != LowerOp && ShiftName != UpperOp) { | |||
5242 | Error(Parser.getTok().getLoc(), Op + " operand expected."); | |||
5243 | return MatchOperand_ParseFail; | |||
5244 | } | |||
5245 | Parser.Lex(); // Eat shift type token. | |||
5246 | ||||
5247 | // There must be a '#' and a shift amount. | |||
5248 | if (Parser.getTok().isNot(AsmToken::Hash) && | |||
5249 | Parser.getTok().isNot(AsmToken::Dollar)) { | |||
5250 | Error(Parser.getTok().getLoc(), "'#' expected"); | |||
5251 | return MatchOperand_ParseFail; | |||
5252 | } | |||
5253 | Parser.Lex(); // Eat hash token. | |||
5254 | ||||
5255 | const MCExpr *ShiftAmount; | |||
5256 | SMLoc Loc = Parser.getTok().getLoc(); | |||
5257 | SMLoc EndLoc; | |||
5258 | if (getParser().parseExpression(ShiftAmount, EndLoc)) { | |||
5259 | Error(Loc, "illegal expression"); | |||
5260 | return MatchOperand_ParseFail; | |||
5261 | } | |||
5262 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); | |||
5263 | if (!CE) { | |||
5264 | Error(Loc, "constant expression expected"); | |||
5265 | return MatchOperand_ParseFail; | |||
5266 | } | |||
5267 | int Val = CE->getValue(); | |||
5268 | if (Val < Low || Val > High) { | |||
5269 | Error(Loc, "immediate value out of range"); | |||
5270 | return MatchOperand_ParseFail; | |||
5271 | } | |||
5272 | ||||
5273 | Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc)); | |||
5274 | ||||
5275 | return MatchOperand_Success; | |||
5276 | } | |||
5277 | ||||
5278 | OperandMatchResultTy | |||
5279 | ARMAsmParser::parseSetEndImm(OperandVector &Operands) { | |||
5280 | MCAsmParser &Parser = getParser(); | |||
5281 | const AsmToken &Tok = Parser.getTok(); | |||
5282 | SMLoc S = Tok.getLoc(); | |||
5283 | if (Tok.isNot(AsmToken::Identifier)) { | |||
5284 | Error(S, "'be' or 'le' operand expected"); | |||
5285 | return MatchOperand_ParseFail; | |||
5286 | } | |||
5287 | int Val = StringSwitch<int>(Tok.getString().lower()) | |||
5288 | .Case("be", 1) | |||
5289 | .Case("le", 0) | |||
5290 | .Default(-1); | |||
5291 | Parser.Lex(); // Eat the token. | |||
5292 | ||||
5293 | if (Val == -1) { | |||
5294 | Error(S, "'be' or 'le' operand expected"); | |||
5295 | return MatchOperand_ParseFail; | |||
5296 | } | |||
5297 | Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::create(Val, | |||
5298 | getContext()), | |||
5299 | S, Tok.getEndLoc())); | |||
5300 | return MatchOperand_Success; | |||
5301 | } | |||
5302 | ||||
5303 | /// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT | |||
5304 | /// instructions. Legal values are: | |||
5305 | /// lsl #n 'n' in [0,31] | |||
5306 | /// asr #n 'n' in [1,32] | |||
5307 | /// n == 32 encoded as n == 0. | |||
5308 | OperandMatchResultTy | |||
5309 | ARMAsmParser::parseShifterImm(OperandVector &Operands) { | |||
5310 | MCAsmParser &Parser = getParser(); | |||
5311 | const AsmToken &Tok = Parser.getTok(); | |||
5312 | SMLoc S = Tok.getLoc(); | |||
5313 | if (Tok.isNot(AsmToken::Identifier)) { | |||
5314 | Error(S, "shift operator 'asr' or 'lsl' expected"); | |||
5315 | return MatchOperand_ParseFail; | |||
5316 | } | |||
5317 | StringRef ShiftName = Tok.getString(); | |||
5318 | bool isASR; | |||
5319 | if (ShiftName == "lsl" || ShiftName == "LSL") | |||
5320 | isASR = false; | |||
5321 | else if (ShiftName == "asr" || ShiftName == "ASR") | |||
5322 | isASR = true; | |||
5323 | else { | |||
5324 | Error(S, "shift operator 'asr' or 'lsl' expected"); | |||
5325 | return MatchOperand_ParseFail; | |||
5326 | } | |||
5327 | Parser.Lex(); // Eat the operator. | |||
5328 | ||||
5329 | // A '#' and a shift amount. | |||
5330 | if (Parser.getTok().isNot(AsmToken::Hash) && | |||
5331 | Parser.getTok().isNot(AsmToken::Dollar)) { | |||
5332 | Error(Parser.getTok().getLoc(), "'#' expected"); | |||
5333 | return MatchOperand_ParseFail; | |||
5334 | } | |||
5335 | Parser.Lex(); // Eat hash token. | |||
5336 | SMLoc ExLoc = Parser.getTok().getLoc(); | |||
5337 | ||||
5338 | const MCExpr *ShiftAmount; | |||
5339 | SMLoc EndLoc; | |||
5340 | if (getParser().parseExpression(ShiftAmount, EndLoc)) { | |||
5341 | Error(ExLoc, "malformed shift expression"); | |||
5342 | return MatchOperand_ParseFail; | |||
5343 | } | |||
5344 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); | |||
5345 | if (!CE) { | |||
5346 | Error(ExLoc, "shift amount must be an immediate"); | |||
5347 | return MatchOperand_ParseFail; | |||
5348 | } | |||
5349 | ||||
5350 | int64_t Val = CE->getValue(); | |||
5351 | if (isASR) { | |||
5352 | // Shift amount must be in [1,32] | |||
5353 | if (Val < 1 || Val > 32) { | |||
5354 | Error(ExLoc, "'asr' shift amount must be in range [1,32]"); | |||
5355 | return MatchOperand_ParseFail; | |||
5356 | } | |||
5357 | // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode. | |||
5358 | if (isThumb() && Val == 32) { | |||
5359 | Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode"); | |||
5360 | return MatchOperand_ParseFail; | |||
5361 | } | |||
5362 | if (Val == 32) Val = 0; | |||
5363 | } else { | |||
5364 | // Shift amount must be in [1,32] | |||
5365 | if (Val < 0 || Val > 31) { | |||
5366 | Error(ExLoc, "'lsr' shift amount must be in range [0,31]"); | |||
5367 | return MatchOperand_ParseFail; | |||
5368 | } | |||
5369 | } | |||
5370 | ||||
5371 | Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc)); | |||
5372 | ||||
5373 | return MatchOperand_Success; | |||
5374 | } | |||
5375 | ||||
5376 | /// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family | |||
5377 | /// of instructions. Legal values are: | |||
5378 | /// ror #n 'n' in {0, 8, 16, 24} | |||
5379 | OperandMatchResultTy | |||
5380 | ARMAsmParser::parseRotImm(OperandVector &Operands) { | |||
5381 | MCAsmParser &Parser = getParser(); | |||
5382 | const AsmToken &Tok = Parser.getTok(); | |||
5383 | SMLoc S = Tok.getLoc(); | |||
5384 | if (Tok.isNot(AsmToken::Identifier)) | |||
5385 | return MatchOperand_NoMatch; | |||
5386 | StringRef ShiftName = Tok.getString(); | |||
5387 | if (ShiftName != "ror" && ShiftName != "ROR") | |||
5388 | return MatchOperand_NoMatch; | |||
5389 | Parser.Lex(); // Eat the operator. | |||
5390 | ||||
5391 | // A '#' and a rotate amount. | |||
5392 | if (Parser.getTok().isNot(AsmToken::Hash) && | |||
5393 | Parser.getTok().isNot(AsmToken::Dollar)) { | |||
5394 | Error(Parser.getTok().getLoc(), "'#' expected"); | |||
5395 | return MatchOperand_ParseFail; | |||
5396 | } | |||
5397 | Parser.Lex(); // Eat hash token. | |||
5398 | SMLoc ExLoc = Parser.getTok().getLoc(); | |||
5399 | ||||
5400 | const MCExpr *ShiftAmount; | |||
5401 | SMLoc EndLoc; | |||
5402 | if (getParser().parseExpression(ShiftAmount, EndLoc)) { | |||
5403 | Error(ExLoc, "malformed rotate expression"); | |||
5404 | return MatchOperand_ParseFail; | |||
5405 | } | |||
5406 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); | |||
5407 | if (!CE) { | |||
5408 | Error(ExLoc, "rotate amount must be an immediate"); | |||
5409 | return MatchOperand_ParseFail; | |||
5410 | } | |||
5411 | ||||
5412 | int64_t Val = CE->getValue(); | |||
5413 | // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension) | |||
5414 | // normally, zero is represented in asm by omitting the rotate operand | |||
5415 | // entirely. | |||
5416 | if (Val != 8 && Val != 16 && Val != 24 && Val != 0) { | |||
5417 | Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24"); | |||
5418 | return MatchOperand_ParseFail; | |||
5419 | } | |||
5420 | ||||
5421 | Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc)); | |||
5422 | ||||
5423 | return MatchOperand_Success; | |||
5424 | } | |||
5425 | ||||
5426 | OperandMatchResultTy | |||
5427 | ARMAsmParser::parseModImm(OperandVector &Operands) { | |||
5428 | MCAsmParser &Parser = getParser(); | |||
5429 | MCAsmLexer &Lexer = getLexer(); | |||
5430 | int64_t Imm1, Imm2; | |||
5431 | ||||
5432 | SMLoc S = Parser.getTok().getLoc(); | |||
5433 | ||||
5434 | // 1) A mod_imm operand can appear in the place of a register name: | |||
5435 | // add r0, #mod_imm | |||
5436 | // add r0, r0, #mod_imm | |||
5437 | // to correctly handle the latter, we bail out as soon as we see an | |||
5438 | // identifier. | |||
5439 | // | |||
5440 | // 2) Similarly, we do not want to parse into complex operands: | |||
5441 | // mov r0, #mod_imm | |||
5442 | // mov r0, :lower16:(_foo) | |||
5443 | if (Parser.getTok().is(AsmToken::Identifier) || | |||
5444 | Parser.getTok().is(AsmToken::Colon)) | |||
5445 | return MatchOperand_NoMatch; | |||
5446 | ||||
5447 | // Hash (dollar) is optional as per the ARMARM | |||
5448 | if (Parser.getTok().is(AsmToken::Hash) || | |||
5449 | Parser.getTok().is(AsmToken::Dollar)) { | |||
5450 | // Avoid parsing into complex operands (#:) | |||
5451 | if (Lexer.peekTok().is(AsmToken::Colon)) | |||
5452 | return MatchOperand_NoMatch; | |||
5453 | ||||
5454 | // Eat the hash (dollar) | |||
5455 | Parser.Lex(); | |||
5456 | } | |||
5457 | ||||
5458 | SMLoc Sx1, Ex1; | |||
5459 | Sx1 = Parser.getTok().getLoc(); | |||
5460 | const MCExpr *Imm1Exp; | |||
5461 | if (getParser().parseExpression(Imm1Exp, Ex1)) { | |||
5462 | Error(Sx1, "malformed expression"); | |||
5463 | return MatchOperand_ParseFail; | |||
5464 | } | |||
5465 | ||||
5466 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm1Exp); | |||
5467 | ||||
5468 | if (CE) { | |||
5469 | // Immediate must fit within 32-bits | |||
5470 | Imm1 = CE->getValue(); | |||
5471 | int Enc = ARM_AM::getSOImmVal(Imm1); | |||
5472 | if (Enc != -1 && Parser.getTok().is(AsmToken::EndOfStatement)) { | |||
5473 | // We have a match! | |||
5474 | Operands.push_back(ARMOperand::CreateModImm((Enc & 0xFF), | |||
5475 | (Enc & 0xF00) >> 7, | |||
5476 | Sx1, Ex1)); | |||
5477 | return MatchOperand_Success; | |||
5478 | } | |||
5479 | ||||
5480 | // We have parsed an immediate which is not for us, fallback to a plain | |||
5481 | // immediate. This can happen for instruction aliases. For an example, | |||
5482 | // ARMInstrInfo.td defines the alias [mov <-> mvn] which can transform | |||
5483 | // a mov (mvn) with a mod_imm_neg/mod_imm_not operand into the opposite | |||
5484 | // instruction with a mod_imm operand. The alias is defined such that the | |||
5485 | // parser method is shared, that's why we have to do this here. | |||
5486 | if (Parser.getTok().is(AsmToken::EndOfStatement)) { | |||
5487 | Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1)); | |||
5488 | return MatchOperand_Success; | |||
5489 | } | |||
5490 | } else { | |||
5491 | // Operands like #(l1 - l2) can only be evaluated at a later stage (via an | |||
5492 | // MCFixup). Fallback to a plain immediate. | |||
5493 | Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1)); | |||
5494 | return MatchOperand_Success; | |||
5495 | } | |||
5496 | ||||
5497 | // From this point onward, we expect the input to be a (#bits, #rot) pair | |||
5498 | if (Parser.getTok().isNot(AsmToken::Comma)) { | |||
5499 | Error(Sx1, "expected modified immediate operand: #[0, 255], #even[0-30]"); | |||
5500 | return MatchOperand_ParseFail; | |||
5501 | } | |||
5502 | ||||
5503 | if (Imm1 & ~0xFF) { | |||
5504 | Error(Sx1, "immediate operand must a number in the range [0, 255]"); | |||
5505 | return MatchOperand_ParseFail; | |||
5506 | } | |||
5507 | ||||
5508 | // Eat the comma | |||
5509 | Parser.Lex(); | |||
5510 | ||||
5511 | // Repeat for #rot | |||
5512 | SMLoc Sx2, Ex2; | |||
5513 | Sx2 = Parser.getTok().getLoc(); | |||
5514 | ||||
5515 | // Eat the optional hash (dollar) | |||
5516 | if (Parser.getTok().is(AsmToken::Hash) || | |||
5517 | Parser.getTok().is(AsmToken::Dollar)) | |||
5518 | Parser.Lex(); | |||
5519 | ||||
5520 | const MCExpr *Imm2Exp; | |||
5521 | if (getParser().parseExpression(Imm2Exp, Ex2)) { | |||
5522 | Error(Sx2, "malformed expression"); | |||
5523 | return MatchOperand_ParseFail; | |||
5524 | } | |||
5525 | ||||
5526 | CE = dyn_cast<MCConstantExpr>(Imm2Exp); | |||
5527 | ||||
5528 | if (CE) { | |||
5529 | Imm2 = CE->getValue(); | |||
5530 | if (!(Imm2 & ~0x1E)) { | |||
5531 | // We have a match! | |||
5532 | Operands.push_back(ARMOperand::CreateModImm(Imm1, Imm2, S, Ex2)); | |||
5533 | return MatchOperand_Success; | |||
5534 | } | |||
5535 | Error(Sx2, "immediate operand must an even number in the range [0, 30]"); | |||
5536 | return MatchOperand_ParseFail; | |||
5537 | } else { | |||
5538 | Error(Sx2, "constant expression expected"); | |||
5539 | return MatchOperand_ParseFail; | |||
5540 | } | |||
5541 | } | |||
5542 | ||||
5543 | OperandMatchResultTy | |||
5544 | ARMAsmParser::parseBitfield(OperandVector &Operands) { | |||
5545 | MCAsmParser &Parser = getParser(); | |||
5546 | SMLoc S = Parser.getTok().getLoc(); | |||
5547 | // The bitfield descriptor is really two operands, the LSB and the width. | |||
5548 | if (Parser.getTok().isNot(AsmToken::Hash) && | |||
5549 | Parser.getTok().isNot(AsmToken::Dollar)) { | |||
5550 | Error(Parser.getTok().getLoc(), "'#' expected"); | |||
5551 | return MatchOperand_ParseFail; | |||
5552 | } | |||
5553 | Parser.Lex(); // Eat hash token. | |||
5554 | ||||
5555 | const MCExpr *LSBExpr; | |||
5556 | SMLoc E = Parser.getTok().getLoc(); | |||
5557 | if (getParser().parseExpression(LSBExpr)) { | |||
5558 | Error(E, "malformed immediate expression"); | |||
5559 | return MatchOperand_ParseFail; | |||
5560 | } | |||
5561 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr); | |||
5562 | if (!CE) { | |||
5563 | Error(E, "'lsb' operand must be an immediate"); | |||
5564 | return MatchOperand_ParseFail; | |||
5565 | } | |||
5566 | ||||
5567 | int64_t LSB = CE->getValue(); | |||
5568 | // The LSB must be in the range [0,31] | |||
5569 | if (LSB < 0 || LSB > 31) { | |||
5570 | Error(E, "'lsb' operand must be in the range [0,31]"); | |||
5571 | return MatchOperand_ParseFail; | |||
5572 | } | |||
5573 | E = Parser.getTok().getLoc(); | |||
5574 | ||||
5575 | // Expect another immediate operand. | |||
5576 | if (Parser.getTok().isNot(AsmToken::Comma)) { | |||
5577 | Error(Parser.getTok().getLoc(), "too few operands"); | |||
5578 | return MatchOperand_ParseFail; | |||
5579 | } | |||
5580 | Parser.Lex(); // Eat hash token. | |||
5581 | if (Parser.getTok().isNot(AsmToken::Hash) && | |||
5582 | Parser.getTok().isNot(AsmToken::Dollar)) { | |||
5583 | Error(Parser.getTok().getLoc(), "'#' expected"); | |||
5584 | return MatchOperand_ParseFail; | |||
5585 | } | |||
5586 | Parser.Lex(); // Eat hash token. | |||
5587 | ||||
5588 | const MCExpr *WidthExpr; | |||
5589 | SMLoc EndLoc; | |||
5590 | if (getParser().parseExpression(WidthExpr, EndLoc)) { | |||
5591 | Error(E, "malformed immediate expression"); | |||
5592 | return MatchOperand_ParseFail; | |||
5593 | } | |||
5594 | CE = dyn_cast<MCConstantExpr>(WidthExpr); | |||
5595 | if (!CE) { | |||
5596 | Error(E, "'width' operand must be an immediate"); | |||
5597 | return MatchOperand_ParseFail; | |||
5598 | } | |||
5599 | ||||
5600 | int64_t Width = CE->getValue(); | |||
5601 | // The LSB must be in the range [1,32-lsb] | |||
5602 | if (Width < 1 || Width > 32 - LSB) { | |||
5603 | Error(E, "'width' operand must be in the range [1,32-lsb]"); | |||
5604 | return MatchOperand_ParseFail; | |||
5605 | } | |||
5606 | ||||
5607 | Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc)); | |||
5608 | ||||
5609 | return MatchOperand_Success; | |||
5610 | } | |||
5611 | ||||
5612 | OperandMatchResultTy | |||
5613 | ARMAsmParser::parsePostIdxReg(OperandVector &Operands) { | |||
5614 | // Check for a post-index addressing register operand. Specifically: | |||
5615 | // postidx_reg := '+' register {, shift} | |||
5616 | // | '-' register {, shift} | |||
5617 | // | register {, shift} | |||
5618 | ||||
5619 | // This method must return MatchOperand_NoMatch without consuming any tokens | |||
5620 | // in the case where there is no match, as other alternatives take other | |||
5621 | // parse methods. | |||
5622 | MCAsmParser &Parser = getParser(); | |||
5623 | AsmToken Tok = Parser.getTok(); | |||
5624 | SMLoc S = Tok.getLoc(); | |||
5625 | bool haveEaten = false; | |||
5626 | bool isAdd = true; | |||
5627 | if (Tok.is(AsmToken::Plus)) { | |||
5628 | Parser.Lex(); // Eat the '+' token. | |||
5629 | haveEaten = true; | |||
5630 | } else if (Tok.is(AsmToken::Minus)) { | |||
5631 | Parser.Lex(); // Eat the '-' token. | |||
5632 | isAdd = false; | |||
5633 | haveEaten = true; | |||
5634 | } | |||
5635 | ||||
5636 | SMLoc E = Parser.getTok().getEndLoc(); | |||
5637 | int Reg = tryParseRegister(); | |||
5638 | if (Reg == -1) { | |||
5639 | if (!haveEaten) | |||
5640 | return MatchOperand_NoMatch; | |||
5641 | Error(Parser.getTok().getLoc(), "register expected"); | |||
5642 | return MatchOperand_ParseFail; | |||
5643 | } | |||
5644 | ||||
5645 | ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift; | |||
5646 | unsigned ShiftImm = 0; | |||
5647 | if (Parser.getTok().is(AsmToken::Comma)) { | |||
5648 | Parser.Lex(); // Eat the ','. | |||
5649 | if (parseMemRegOffsetShift(ShiftTy, ShiftImm)) | |||
5650 | return MatchOperand_ParseFail; | |||
5651 | ||||
5652 | // FIXME: Only approximates end...may include intervening whitespace. | |||
5653 | E = Parser.getTok().getLoc(); | |||
5654 | } | |||
5655 | ||||
5656 | Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy, | |||
5657 | ShiftImm, S, E)); | |||
5658 | ||||
5659 | return MatchOperand_Success; | |||
5660 | } | |||
5661 | ||||
5662 | OperandMatchResultTy | |||
5663 | ARMAsmParser::parseAM3Offset(OperandVector &Operands) { | |||
5664 | // Check for a post-index addressing register operand. Specifically: | |||
5665 | // am3offset := '+' register | |||
5666 | // | '-' register | |||
5667 | // | register | |||
5668 | // | # imm | |||
5669 | // | # + imm | |||
5670 | // | # - imm | |||
5671 | ||||
5672 | // This method must return MatchOperand_NoMatch without consuming any tokens | |||
5673 | // in the case where there is no match, as other alternatives take other | |||
5674 | // parse methods. | |||
5675 | MCAsmParser &Parser = getParser(); | |||
5676 | AsmToken Tok = Parser.getTok(); | |||
5677 | SMLoc S = Tok.getLoc(); | |||
5678 | ||||
5679 | // Do immediates first, as we always parse those if we have a '#'. | |||
5680 | if (Parser.getTok().is(AsmToken::Hash) || | |||
5681 | Parser.getTok().is(AsmToken::Dollar)) { | |||
5682 | Parser.Lex(); // Eat '#' or '$'. | |||
5683 | // Explicitly look for a '-', as we need to encode negative zero | |||
5684 | // differently. | |||
5685 | bool isNegative = Parser.getTok().is(AsmToken::Minus); | |||
5686 | const MCExpr *Offset; | |||
5687 | SMLoc E; | |||
5688 | if (getParser().parseExpression(Offset, E)) | |||
5689 | return MatchOperand_ParseFail; | |||
5690 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); | |||
5691 | if (!CE) { | |||
5692 | Error(S, "constant expression expected"); | |||
5693 | return MatchOperand_ParseFail; | |||
5694 | } | |||
5695 | // Negative zero is encoded as the flag value | |||
5696 | // std::numeric_limits<int32_t>::min(). | |||
5697 | int32_t Val = CE->getValue(); | |||
5698 | if (isNegative && Val == 0) | |||
5699 | Val = std::numeric_limits<int32_t>::min(); | |||
5700 | ||||
5701 | Operands.push_back( | |||
5702 | ARMOperand::CreateImm(MCConstantExpr::create(Val, getContext()), S, E)); | |||
5703 | ||||
5704 | return MatchOperand_Success; | |||
5705 | } | |||
5706 | ||||
5707 | bool haveEaten = false; | |||
5708 | bool isAdd = true; | |||
5709 | if (Tok.is(AsmToken::Plus)) { | |||
5710 | Parser.Lex(); // Eat the '+' token. | |||
5711 | haveEaten = true; | |||
5712 | } else if (Tok.is(AsmToken::Minus)) { | |||
5713 | Parser.Lex(); // Eat the '-' token. | |||
5714 | isAdd = false; | |||
5715 | haveEaten = true; | |||
5716 | } | |||
5717 | ||||
5718 | Tok = Parser.getTok(); | |||
5719 | int Reg = tryParseRegister(); | |||
5720 | if (Reg == -1) { | |||
5721 | if (!haveEaten) | |||
5722 | return MatchOperand_NoMatch; | |||
5723 | Error(Tok.getLoc(), "register expected"); | |||
5724 | return MatchOperand_ParseFail; | |||
5725 | } | |||
5726 | ||||
5727 | Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift, | |||
5728 | 0, S, Tok.getEndLoc())); | |||
5729 | ||||
5730 | return MatchOperand_Success; | |||
5731 | } | |||
5732 | ||||
5733 | /// Convert parsed operands to MCInst. Needed here because this instruction | |||
5734 | /// only has two register operands, but multiplication is commutative so | |||
5735 | /// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN". | |||
5736 | void ARMAsmParser::cvtThumbMultiply(MCInst &Inst, | |||
5737 | const OperandVector &Operands) { | |||
5738 | ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1); | |||
5739 | ((ARMOperand &)*Operands[1]).addCCOutOperands(Inst, 1); | |||
5740 | // If we have a three-operand form, make sure to set Rn to be the operand | |||
5741 | // that isn't the same as Rd. | |||
5742 | unsigned RegOp = 4; | |||
5743 | if (Operands.size() == 6 && | |||
5744 | ((ARMOperand &)*Operands[4]).getReg() == | |||
5745 | ((ARMOperand &)*Operands[3]).getReg()) | |||
5746 | RegOp = 5; | |||
5747 | ((ARMOperand &)*Operands[RegOp]).addRegOperands(Inst, 1); | |||
5748 | Inst.addOperand(Inst.getOperand(0)); | |||
5749 | ((ARMOperand &)*Operands[2]).addCondCodeOperands(Inst, 2); | |||
5750 | } | |||
5751 | ||||
5752 | void ARMAsmParser::cvtThumbBranches(MCInst &Inst, | |||
5753 | const OperandVector &Operands) { | |||
5754 | int CondOp = -1, ImmOp = -1; | |||
5755 | switch(Inst.getOpcode()) { | |||
5756 | case ARM::tB: | |||
5757 | case ARM::tBcc: CondOp = 1; ImmOp = 2; break; | |||
5758 | ||||
5759 | case ARM::t2B: | |||
5760 | case ARM::t2Bcc: CondOp = 1; ImmOp = 3; break; | |||
5761 | ||||
5762 | default: llvm_unreachable("Unexpected instruction in cvtThumbBranches")::llvm::llvm_unreachable_internal("Unexpected instruction in cvtThumbBranches" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 5762); | |||
5763 | } | |||
5764 | // first decide whether or not the branch should be conditional | |||
5765 | // by looking at it's location relative to an IT block | |||
5766 | if(inITBlock()) { | |||
5767 | // inside an IT block we cannot have any conditional branches. any | |||
5768 | // such instructions needs to be converted to unconditional form | |||
5769 | switch(Inst.getOpcode()) { | |||
5770 | case ARM::tBcc: Inst.setOpcode(ARM::tB); break; | |||
5771 | case ARM::t2Bcc: Inst.setOpcode(ARM::t2B); break; | |||
5772 | } | |||
5773 | } else { | |||
5774 | // outside IT blocks we can only have unconditional branches with AL | |||
5775 | // condition code or conditional branches with non-AL condition code | |||
5776 | unsigned Cond = static_cast<ARMOperand &>(*Operands[CondOp]).getCondCode(); | |||
5777 | switch(Inst.getOpcode()) { | |||
5778 | case ARM::tB: | |||
5779 | case ARM::tBcc: | |||
5780 | Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc); | |||
5781 | break; | |||
5782 | case ARM::t2B: | |||
5783 | case ARM::t2Bcc: | |||
5784 | Inst.setOpcode(Cond == ARMCC::AL ? ARM::t2B : ARM::t2Bcc); | |||
5785 | break; | |||
5786 | } | |||
5787 | } | |||
5788 | ||||
5789 | // now decide on encoding size based on branch target range | |||
5790 | switch(Inst.getOpcode()) { | |||
5791 | // classify tB as either t2B or t1B based on range of immediate operand | |||
5792 | case ARM::tB: { | |||
5793 | ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]); | |||
5794 | if (!op.isSignedOffset<11, 1>() && isThumb() && hasV8MBaseline()) | |||
5795 | Inst.setOpcode(ARM::t2B); | |||
5796 | break; | |||
5797 | } | |||
5798 | // classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand | |||
5799 | case ARM::tBcc: { | |||
5800 | ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]); | |||
5801 | if (!op.isSignedOffset<8, 1>() && isThumb() && hasV8MBaseline()) | |||
5802 | Inst.setOpcode(ARM::t2Bcc); | |||
5803 | break; | |||
5804 | } | |||
5805 | } | |||
5806 | ((ARMOperand &)*Operands[ImmOp]).addImmOperands(Inst, 1); | |||
5807 | ((ARMOperand &)*Operands[CondOp]).addCondCodeOperands(Inst, 2); | |||
5808 | } | |||
5809 | ||||
5810 | void ARMAsmParser::cvtMVEVMOVQtoDReg( | |||
5811 | MCInst &Inst, const OperandVector &Operands) { | |||
5812 | ||||
5813 | // mnemonic, condition code, Rt, Rt2, Qd, idx, Qd again, idx2 | |||
5814 | assert(Operands.size() == 8)(static_cast <bool> (Operands.size() == 8) ? void (0) : __assert_fail ("Operands.size() == 8", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 5814, __extension__ __PRETTY_FUNCTION__)); | |||
5815 | ||||
5816 | ((ARMOperand &)*Operands[2]).addRegOperands(Inst, 1); // Rt | |||
5817 | ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1); // Rt2 | |||
5818 | ((ARMOperand &)*Operands[4]).addRegOperands(Inst, 1); // Qd | |||
5819 | ((ARMOperand &)*Operands[5]).addMVEPairVectorIndexOperands(Inst, 1); // idx | |||
5820 | // skip second copy of Qd in Operands[6] | |||
5821 | ((ARMOperand &)*Operands[7]).addMVEPairVectorIndexOperands(Inst, 1); // idx2 | |||
5822 | ((ARMOperand &)*Operands[1]).addCondCodeOperands(Inst, 2); // condition code | |||
5823 | } | |||
5824 | ||||
5825 | /// Parse an ARM memory expression, return false if successful else return true | |||
5826 | /// or an error. The first token must be a '[' when called. | |||
5827 | bool ARMAsmParser::parseMemory(OperandVector &Operands) { | |||
5828 | MCAsmParser &Parser = getParser(); | |||
5829 | SMLoc S, E; | |||
5830 | if (Parser.getTok().isNot(AsmToken::LBrac)) | |||
5831 | return TokError("Token is not a Left Bracket"); | |||
5832 | S = Parser.getTok().getLoc(); | |||
5833 | Parser.Lex(); // Eat left bracket token. | |||
5834 | ||||
5835 | const AsmToken &BaseRegTok = Parser.getTok(); | |||
5836 | int BaseRegNum = tryParseRegister(); | |||
5837 | if (BaseRegNum == -1) | |||
5838 | return Error(BaseRegTok.getLoc(), "register expected"); | |||
5839 | ||||
5840 | // The next token must either be a comma, a colon or a closing bracket. | |||
5841 | const AsmToken &Tok = Parser.getTok(); | |||
5842 | if (!Tok.is(AsmToken::Colon) && !Tok.is(AsmToken::Comma) && | |||
5843 | !Tok.is(AsmToken::RBrac)) | |||
5844 | return Error(Tok.getLoc(), "malformed memory operand"); | |||
5845 | ||||
5846 | if (Tok.is(AsmToken::RBrac)) { | |||
5847 | E = Tok.getEndLoc(); | |||
5848 | Parser.Lex(); // Eat right bracket token. | |||
5849 | ||||
5850 | Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0, | |||
5851 | ARM_AM::no_shift, 0, 0, false, | |||
5852 | S, E)); | |||
5853 | ||||
5854 | // If there's a pre-indexing writeback marker, '!', just add it as a token | |||
5855 | // operand. It's rather odd, but syntactically valid. | |||
5856 | if (Parser.getTok().is(AsmToken::Exclaim)) { | |||
5857 | Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); | |||
5858 | Parser.Lex(); // Eat the '!'. | |||
5859 | } | |||
5860 | ||||
5861 | return false; | |||
5862 | } | |||
5863 | ||||
5864 | assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) &&(static_cast <bool> ((Tok.is(AsmToken::Colon) || Tok.is (AsmToken::Comma)) && "Lost colon or comma in memory operand?!" ) ? void (0) : __assert_fail ("(Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) && \"Lost colon or comma in memory operand?!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 5865, __extension__ __PRETTY_FUNCTION__)) | |||
5865 | "Lost colon or comma in memory operand?!")(static_cast <bool> ((Tok.is(AsmToken::Colon) || Tok.is (AsmToken::Comma)) && "Lost colon or comma in memory operand?!" ) ? void (0) : __assert_fail ("(Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) && \"Lost colon or comma in memory operand?!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 5865, __extension__ __PRETTY_FUNCTION__)); | |||
5866 | if (Tok.is(AsmToken::Comma)) { | |||
5867 | Parser.Lex(); // Eat the comma. | |||
5868 | } | |||
5869 | ||||
5870 | // If we have a ':', it's an alignment specifier. | |||
5871 | if (Parser.getTok().is(AsmToken::Colon)) { | |||
5872 | Parser.Lex(); // Eat the ':'. | |||
5873 | E = Parser.getTok().getLoc(); | |||
5874 | SMLoc AlignmentLoc = Tok.getLoc(); | |||
5875 | ||||
5876 | const MCExpr *Expr; | |||
5877 | if (getParser().parseExpression(Expr)) | |||
5878 | return true; | |||
5879 | ||||
5880 | // The expression has to be a constant. Memory references with relocations | |||
5881 | // don't come through here, as they use the <label> forms of the relevant | |||
5882 | // instructions. | |||
5883 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); | |||
5884 | if (!CE) | |||
5885 | return Error (E, "constant expression expected"); | |||
5886 | ||||
5887 | unsigned Align = 0; | |||
5888 | switch (CE->getValue()) { | |||
5889 | default: | |||
5890 | return Error(E, | |||
5891 | "alignment specifier must be 16, 32, 64, 128, or 256 bits"); | |||
5892 | case 16: Align = 2; break; | |||
5893 | case 32: Align = 4; break; | |||
5894 | case 64: Align = 8; break; | |||
5895 | case 128: Align = 16; break; | |||
5896 | case 256: Align = 32; break; | |||
5897 | } | |||
5898 | ||||
5899 | // Now we should have the closing ']' | |||
5900 | if (Parser.getTok().isNot(AsmToken::RBrac)) | |||
5901 | return Error(Parser.getTok().getLoc(), "']' expected"); | |||
5902 | E = Parser.getTok().getEndLoc(); | |||
5903 | Parser.Lex(); // Eat right bracket token. | |||
5904 | ||||
5905 | // Don't worry about range checking the value here. That's handled by | |||
5906 | // the is*() predicates. | |||
5907 | Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0, | |||
5908 | ARM_AM::no_shift, 0, Align, | |||
5909 | false, S, E, AlignmentLoc)); | |||
5910 | ||||
5911 | // If there's a pre-indexing writeback marker, '!', just add it as a token | |||
5912 | // operand. | |||
5913 | if (Parser.getTok().is(AsmToken::Exclaim)) { | |||
5914 | Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); | |||
5915 | Parser.Lex(); // Eat the '!'. | |||
5916 | } | |||
5917 | ||||
5918 | return false; | |||
5919 | } | |||
5920 | ||||
5921 | // If we have a '#' or '$', it's an immediate offset, else assume it's a | |||
5922 | // register offset. Be friendly and also accept a plain integer or expression | |||
5923 | // (without a leading hash) for gas compatibility. | |||
5924 | if (Parser.getTok().is(AsmToken::Hash) || | |||
5925 | Parser.getTok().is(AsmToken::Dollar) || | |||
5926 | Parser.getTok().is(AsmToken::LParen) || | |||
5927 | Parser.getTok().is(AsmToken::Integer)) { | |||
5928 | if (Parser.getTok().is(AsmToken::Hash) || | |||
5929 | Parser.getTok().is(AsmToken::Dollar)) | |||
5930 | Parser.Lex(); // Eat '#' or '$' | |||
5931 | E = Parser.getTok().getLoc(); | |||
5932 | ||||
5933 | bool isNegative = getParser().getTok().is(AsmToken::Minus); | |||
5934 | const MCExpr *Offset, *AdjustedOffset; | |||
5935 | if (getParser().parseExpression(Offset)) | |||
5936 | return true; | |||
5937 | ||||
5938 | if (const auto *CE = dyn_cast<MCConstantExpr>(Offset)) { | |||
5939 | // If the constant was #-0, represent it as | |||
5940 | // std::numeric_limits<int32_t>::min(). | |||
5941 | int32_t Val = CE->getValue(); | |||
5942 | if (isNegative && Val == 0) | |||
5943 | CE = MCConstantExpr::create(std::numeric_limits<int32_t>::min(), | |||
5944 | getContext()); | |||
5945 | // Don't worry about range checking the value here. That's handled by | |||
5946 | // the is*() predicates. | |||
5947 | AdjustedOffset = CE; | |||
5948 | } else | |||
5949 | AdjustedOffset = Offset; | |||
5950 | Operands.push_back(ARMOperand::CreateMem( | |||
5951 | BaseRegNum, AdjustedOffset, 0, ARM_AM::no_shift, 0, 0, false, S, E)); | |||
5952 | ||||
5953 | // Now we should have the closing ']' | |||
5954 | if (Parser.getTok().isNot(AsmToken::RBrac)) | |||
5955 | return Error(Parser.getTok().getLoc(), "']' expected"); | |||
5956 | E = Parser.getTok().getEndLoc(); | |||
5957 | Parser.Lex(); // Eat right bracket token. | |||
5958 | ||||
5959 | // If there's a pre-indexing writeback marker, '!', just add it as a token | |||
5960 | // operand. | |||
5961 | if (Parser.getTok().is(AsmToken::Exclaim)) { | |||
5962 | Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); | |||
5963 | Parser.Lex(); // Eat the '!'. | |||
5964 | } | |||
5965 | ||||
5966 | return false; | |||
5967 | } | |||
5968 | ||||
5969 | // The register offset is optionally preceded by a '+' or '-' | |||
5970 | bool isNegative = false; | |||
5971 | if (Parser.getTok().is(AsmToken::Minus)) { | |||
5972 | isNegative = true; | |||
5973 | Parser.Lex(); // Eat the '-'. | |||
5974 | } else if (Parser.getTok().is(AsmToken::Plus)) { | |||
5975 | // Nothing to do. | |||
5976 | Parser.Lex(); // Eat the '+'. | |||
5977 | } | |||
5978 | ||||
5979 | E = Parser.getTok().getLoc(); | |||
5980 | int OffsetRegNum = tryParseRegister(); | |||
5981 | if (OffsetRegNum == -1) | |||
5982 | return Error(E, "register expected"); | |||
5983 | ||||
5984 | // If there's a shift operator, handle it. | |||
5985 | ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift; | |||
5986 | unsigned ShiftImm = 0; | |||
5987 | if (Parser.getTok().is(AsmToken::Comma)) { | |||
5988 | Parser.Lex(); // Eat the ','. | |||
5989 | if (parseMemRegOffsetShift(ShiftType, ShiftImm)) | |||
5990 | return true; | |||
5991 | } | |||
5992 | ||||
5993 | // Now we should have the closing ']' | |||
5994 | if (Parser.getTok().isNot(AsmToken::RBrac)) | |||
5995 | return Error(Parser.getTok().getLoc(), "']' expected"); | |||
5996 | E = Parser.getTok().getEndLoc(); | |||
5997 | Parser.Lex(); // Eat right bracket token. | |||
5998 | ||||
5999 | Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, OffsetRegNum, | |||
6000 | ShiftType, ShiftImm, 0, isNegative, | |||
6001 | S, E)); | |||
6002 | ||||
6003 | // If there's a pre-indexing writeback marker, '!', just add it as a token | |||
6004 | // operand. | |||
6005 | if (Parser.getTok().is(AsmToken::Exclaim)) { | |||
6006 | Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); | |||
6007 | Parser.Lex(); // Eat the '!'. | |||
6008 | } | |||
6009 | ||||
6010 | return false; | |||
6011 | } | |||
6012 | ||||
6013 | /// parseMemRegOffsetShift - one of these two: | |||
6014 | /// ( lsl | lsr | asr | ror ) , # shift_amount | |||
6015 | /// rrx | |||
6016 | /// return true if it parses a shift otherwise it returns false. | |||
6017 | bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St, | |||
6018 | unsigned &Amount) { | |||
6019 | MCAsmParser &Parser = getParser(); | |||
6020 | SMLoc Loc = Parser.getTok().getLoc(); | |||
6021 | const AsmToken &Tok = Parser.getTok(); | |||
6022 | if (Tok.isNot(AsmToken::Identifier)) | |||
6023 | return Error(Loc, "illegal shift operator"); | |||
6024 | StringRef ShiftName = Tok.getString(); | |||
6025 | if (ShiftName == "lsl" || ShiftName == "LSL" || | |||
6026 | ShiftName == "asl" || ShiftName == "ASL") | |||
6027 | St = ARM_AM::lsl; | |||
6028 | else if (ShiftName == "lsr" || ShiftName == "LSR") | |||
6029 | St = ARM_AM::lsr; | |||
6030 | else if (ShiftName == "asr" || ShiftName == "ASR") | |||
6031 | St = ARM_AM::asr; | |||
6032 | else if (ShiftName == "ror" || ShiftName == "ROR") | |||
6033 | St = ARM_AM::ror; | |||
6034 | else if (ShiftName == "rrx" || ShiftName == "RRX") | |||
6035 | St = ARM_AM::rrx; | |||
6036 | else if (ShiftName == "uxtw" || ShiftName == "UXTW") | |||
6037 | St = ARM_AM::uxtw; | |||
6038 | else | |||
6039 | return Error(Loc, "illegal shift operator"); | |||
6040 | Parser.Lex(); // Eat shift type token. | |||
6041 | ||||
6042 | // rrx stands alone. | |||
6043 | Amount = 0; | |||
6044 | if (St != ARM_AM::rrx) { | |||
6045 | Loc = Parser.getTok().getLoc(); | |||
6046 | // A '#' and a shift amount. | |||
6047 | const AsmToken &HashTok = Parser.getTok(); | |||
6048 | if (HashTok.isNot(AsmToken::Hash) && | |||
6049 | HashTok.isNot(AsmToken::Dollar)) | |||
6050 | return Error(HashTok.getLoc(), "'#' expected"); | |||
6051 | Parser.Lex(); // Eat hash token. | |||
6052 | ||||
6053 | const MCExpr *Expr; | |||
6054 | if (getParser().parseExpression(Expr)) | |||
6055 | return true; | |||
6056 | // Range check the immediate. | |||
6057 | // lsl, ror: 0 <= imm <= 31 | |||
6058 | // lsr, asr: 0 <= imm <= 32 | |||
6059 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); | |||
6060 | if (!CE) | |||
6061 | return Error(Loc, "shift amount must be an immediate"); | |||
6062 | int64_t Imm = CE->getValue(); | |||
6063 | if (Imm < 0 || | |||
6064 | ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) || | |||
6065 | ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32)) | |||
6066 | return Error(Loc, "immediate shift value out of range"); | |||
6067 | // If <ShiftTy> #0, turn it into a no_shift. | |||
6068 | if (Imm == 0) | |||
6069 | St = ARM_AM::lsl; | |||
6070 | // For consistency, treat lsr #32 and asr #32 as having immediate value 0. | |||
6071 | if (Imm == 32) | |||
6072 | Imm = 0; | |||
6073 | Amount = Imm; | |||
6074 | } | |||
6075 | ||||
6076 | return false; | |||
6077 | } | |||
6078 | ||||
6079 | /// parseFPImm - A floating point immediate expression operand. | |||
6080 | OperandMatchResultTy | |||
6081 | ARMAsmParser::parseFPImm(OperandVector &Operands) { | |||
6082 | MCAsmParser &Parser = getParser(); | |||
6083 | // Anything that can accept a floating point constant as an operand | |||
6084 | // needs to go through here, as the regular parseExpression is | |||
6085 | // integer only. | |||
6086 | // | |||
6087 | // This routine still creates a generic Immediate operand, containing | |||
6088 | // a bitcast of the 64-bit floating point value. The various operands | |||
6089 | // that accept floats can check whether the value is valid for them | |||
6090 | // via the standard is*() predicates. | |||
6091 | ||||
6092 | SMLoc S = Parser.getTok().getLoc(); | |||
6093 | ||||
6094 | if (Parser.getTok().isNot(AsmToken::Hash) && | |||
6095 | Parser.getTok().isNot(AsmToken::Dollar)) | |||
6096 | return MatchOperand_NoMatch; | |||
6097 | ||||
6098 | // Disambiguate the VMOV forms that can accept an FP immediate. | |||
6099 | // vmov.f32 <sreg>, #imm | |||
6100 | // vmov.f64 <dreg>, #imm | |||
6101 | // vmov.f32 <dreg>, #imm @ vector f32x2 | |||
6102 | // vmov.f32 <qreg>, #imm @ vector f32x4 | |||
6103 | // | |||
6104 | // There are also the NEON VMOV instructions which expect an | |||
6105 | // integer constant. Make sure we don't try to parse an FPImm | |||
6106 | // for these: | |||
6107 | // vmov.i{8|16|32|64} <dreg|qreg>, #imm | |||
6108 | ARMOperand &TyOp = static_cast<ARMOperand &>(*Operands[2]); | |||
6109 | bool isVmovf = TyOp.isToken() && | |||
6110 | (TyOp.getToken() == ".f32" || TyOp.getToken() == ".f64" || | |||
6111 | TyOp.getToken() == ".f16"); | |||
6112 | ARMOperand &Mnemonic = static_cast<ARMOperand &>(*Operands[0]); | |||
6113 | bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() == "fconstd" || | |||
6114 | Mnemonic.getToken() == "fconsts"); | |||
6115 | if (!(isVmovf || isFconst)) | |||
6116 | return MatchOperand_NoMatch; | |||
6117 | ||||
6118 | Parser.Lex(); // Eat '#' or '$'. | |||
6119 | ||||
6120 | // Handle negation, as that still comes through as a separate token. | |||
6121 | bool isNegative = false; | |||
6122 | if (Parser.getTok().is(AsmToken::Minus)) { | |||
6123 | isNegative = true; | |||
6124 | Parser.Lex(); | |||
6125 | } | |||
6126 | const AsmToken &Tok = Parser.getTok(); | |||
6127 | SMLoc Loc = Tok.getLoc(); | |||
6128 | if (Tok.is(AsmToken::Real) && isVmovf) { | |||
6129 | APFloat RealVal(APFloat::IEEEsingle(), Tok.getString()); | |||
6130 | uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); | |||
6131 | // If we had a '-' in front, toggle the sign bit. | |||
6132 | IntVal ^= (uint64_t)isNegative << 31; | |||
6133 | Parser.Lex(); // Eat the token. | |||
6134 | Operands.push_back(ARMOperand::CreateImm( | |||
6135 | MCConstantExpr::create(IntVal, getContext()), | |||
6136 | S, Parser.getTok().getLoc())); | |||
6137 | return MatchOperand_Success; | |||
6138 | } | |||
6139 | // Also handle plain integers. Instructions which allow floating point | |||
6140 | // immediates also allow a raw encoded 8-bit value. | |||
6141 | if (Tok.is(AsmToken::Integer) && isFconst) { | |||
6142 | int64_t Val = Tok.getIntVal(); | |||
6143 | Parser.Lex(); // Eat the token. | |||
6144 | if (Val > 255 || Val < 0) { | |||
6145 | Error(Loc, "encoded floating point value out of range"); | |||
6146 | return MatchOperand_ParseFail; | |||
6147 | } | |||
6148 | float RealVal = ARM_AM::getFPImmFloat(Val); | |||
6149 | Val = APFloat(RealVal).bitcastToAPInt().getZExtValue(); | |||
6150 | ||||
6151 | Operands.push_back(ARMOperand::CreateImm( | |||
6152 | MCConstantExpr::create(Val, getContext()), S, | |||
6153 | Parser.getTok().getLoc())); | |||
6154 | return MatchOperand_Success; | |||
6155 | } | |||
6156 | ||||
6157 | Error(Loc, "invalid floating point immediate"); | |||
6158 | return MatchOperand_ParseFail; | |||
6159 | } | |||
6160 | ||||
6161 | /// Parse a arm instruction operand. For now this parses the operand regardless | |||
6162 | /// of the mnemonic. | |||
6163 | bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) { | |||
6164 | MCAsmParser &Parser = getParser(); | |||
6165 | SMLoc S, E; | |||
6166 | ||||
6167 | // Check if the current operand has a custom associated parser, if so, try to | |||
6168 | // custom parse the operand, or fallback to the general approach. | |||
6169 | OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic); | |||
6170 | if (ResTy == MatchOperand_Success) | |||
6171 | return false; | |||
6172 | // If there wasn't a custom match, try the generic matcher below. Otherwise, | |||
6173 | // there was a match, but an error occurred, in which case, just return that | |||
6174 | // the operand parsing failed. | |||
6175 | if (ResTy == MatchOperand_ParseFail) | |||
6176 | return true; | |||
6177 | ||||
6178 | switch (getLexer().getKind()) { | |||
6179 | default: | |||
6180 | Error(Parser.getTok().getLoc(), "unexpected token in operand"); | |||
6181 | return true; | |||
6182 | case AsmToken::Identifier: { | |||
6183 | // If we've seen a branch mnemonic, the next operand must be a label. This | |||
6184 | // is true even if the label is a register name. So "br r1" means branch to | |||
6185 | // label "r1". | |||
6186 | bool ExpectLabel = Mnemonic == "b" || Mnemonic == "bl"; | |||
6187 | if (!ExpectLabel) { | |||
6188 | if (!tryParseRegisterWithWriteBack(Operands)) | |||
6189 | return false; | |||
6190 | int Res = tryParseShiftRegister(Operands); | |||
6191 | if (Res == 0) // success | |||
6192 | return false; | |||
6193 | else if (Res == -1) // irrecoverable error | |||
6194 | return true; | |||
6195 | // If this is VMRS, check for the apsr_nzcv operand. | |||
6196 | if (Mnemonic == "vmrs" && | |||
6197 | Parser.getTok().getString().equals_insensitive("apsr_nzcv")) { | |||
6198 | S = Parser.getTok().getLoc(); | |||
6199 | Parser.Lex(); | |||
6200 | Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S)); | |||
6201 | return false; | |||
6202 | } | |||
6203 | } | |||
6204 | ||||
6205 | // Fall though for the Identifier case that is not a register or a | |||
6206 | // special name. | |||
6207 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | |||
6208 | } | |||
6209 | case AsmToken::LParen: // parenthesized expressions like (_strcmp-4) | |||
6210 | case AsmToken::Integer: // things like 1f and 2b as a branch targets | |||
6211 | case AsmToken::String: // quoted label names. | |||
6212 | case AsmToken::Dot: { // . as a branch target | |||
6213 | // This was not a register so parse other operands that start with an | |||
6214 | // identifier (like labels) as expressions and create them as immediates. | |||
6215 | const MCExpr *IdVal; | |||
6216 | S = Parser.getTok().getLoc(); | |||
6217 | if (getParser().parseExpression(IdVal)) | |||
6218 | return true; | |||
6219 | E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); | |||
6220 | Operands.push_back(ARMOperand::CreateImm(IdVal, S, E)); | |||
6221 | return false; | |||
6222 | } | |||
6223 | case AsmToken::LBrac: | |||
6224 | return parseMemory(Operands); | |||
6225 | case AsmToken::LCurly: | |||
6226 | return parseRegisterList(Operands, !Mnemonic.startswith("clr")); | |||
6227 | case AsmToken::Dollar: | |||
6228 | case AsmToken::Hash: { | |||
6229 | // #42 -> immediate | |||
6230 | // $ 42 -> immediate | |||
6231 | // $foo -> symbol name | |||
6232 | // $42 -> symbol name | |||
6233 | S = Parser.getTok().getLoc(); | |||
6234 | ||||
6235 | // Favor the interpretation of $-prefixed operands as symbol names. | |||
6236 | // Cases where immediates are explicitly expected are handled by their | |||
6237 | // specific ParseMethod implementations. | |||
6238 | auto AdjacentToken = getLexer().peekTok(/*ShouldSkipSpace=*/false); | |||
6239 | bool ExpectIdentifier = Parser.getTok().is(AsmToken::Dollar) && | |||
6240 | (AdjacentToken.is(AsmToken::Identifier) || | |||
6241 | AdjacentToken.is(AsmToken::Integer)); | |||
6242 | if (!ExpectIdentifier) { | |||
6243 | // Token is not part of identifier. Drop leading $ or # before parsing | |||
6244 | // expression. | |||
6245 | Parser.Lex(); | |||
6246 | } | |||
6247 | ||||
6248 | if (Parser.getTok().isNot(AsmToken::Colon)) { | |||
6249 | bool IsNegative = Parser.getTok().is(AsmToken::Minus); | |||
6250 | const MCExpr *ImmVal; | |||
6251 | if (getParser().parseExpression(ImmVal)) | |||
6252 | return true; | |||
6253 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal); | |||
6254 | if (CE) { | |||
6255 | int32_t Val = CE->getValue(); | |||
6256 | if (IsNegative && Val == 0) | |||
6257 | ImmVal = MCConstantExpr::create(std::numeric_limits<int32_t>::min(), | |||
6258 | getContext()); | |||
6259 | } | |||
6260 | E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); | |||
6261 | Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E)); | |||
6262 | ||||
6263 | // There can be a trailing '!' on operands that we want as a separate | |||
6264 | // '!' Token operand. Handle that here. For example, the compatibility | |||
6265 | // alias for 'srsdb sp!, #imm' is 'srsdb #imm!'. | |||
6266 | if (Parser.getTok().is(AsmToken::Exclaim)) { | |||
6267 | Operands.push_back(ARMOperand::CreateToken(Parser.getTok().getString(), | |||
6268 | Parser.getTok().getLoc())); | |||
6269 | Parser.Lex(); // Eat exclaim token | |||
6270 | } | |||
6271 | return false; | |||
6272 | } | |||
6273 | // w/ a ':' after the '#', it's just like a plain ':'. | |||
6274 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | |||
6275 | } | |||
6276 | case AsmToken::Colon: { | |||
6277 | S = Parser.getTok().getLoc(); | |||
6278 | // ":lower16:" and ":upper16:" expression prefixes | |||
6279 | // FIXME: Check it's an expression prefix, | |||
6280 | // e.g. (FOO - :lower16:BAR) isn't legal. | |||
6281 | ARMMCExpr::VariantKind RefKind; | |||
6282 | if (parsePrefix(RefKind)) | |||
6283 | return true; | |||
6284 | ||||
6285 | const MCExpr *SubExprVal; | |||
6286 | if (getParser().parseExpression(SubExprVal)) | |||
6287 | return true; | |||
6288 | ||||
6289 | const MCExpr *ExprVal = ARMMCExpr::create(RefKind, SubExprVal, | |||
6290 | getContext()); | |||
6291 | E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); | |||
6292 | Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E)); | |||
6293 | return false; | |||
6294 | } | |||
6295 | case AsmToken::Equal: { | |||
6296 | S = Parser.getTok().getLoc(); | |||
6297 | if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val) | |||
6298 | return Error(S, "unexpected token in operand"); | |||
6299 | Parser.Lex(); // Eat '=' | |||
6300 | const MCExpr *SubExprVal; | |||
6301 | if (getParser().parseExpression(SubExprVal)) | |||
6302 | return true; | |||
6303 | E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); | |||
6304 | ||||
6305 | // execute-only: we assume that assembly programmers know what they are | |||
6306 | // doing and allow literal pool creation here | |||
6307 | Operands.push_back(ARMOperand::CreateConstantPoolImm(SubExprVal, S, E)); | |||
6308 | return false; | |||
6309 | } | |||
6310 | } | |||
6311 | } | |||
6312 | ||||
6313 | // parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e. | |||
6314 | // :lower16: and :upper16:. | |||
6315 | bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) { | |||
6316 | MCAsmParser &Parser = getParser(); | |||
6317 | RefKind = ARMMCExpr::VK_ARM_None; | |||
6318 | ||||
6319 | // consume an optional '#' (GNU compatibility) | |||
6320 | if (getLexer().is(AsmToken::Hash)) | |||
6321 | Parser.Lex(); | |||
6322 | ||||
6323 | // :lower16: and :upper16: modifiers | |||
6324 | assert(getLexer().is(AsmToken::Colon) && "expected a :")(static_cast <bool> (getLexer().is(AsmToken::Colon) && "expected a :") ? void (0) : __assert_fail ("getLexer().is(AsmToken::Colon) && \"expected a :\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 6324, __extension__ __PRETTY_FUNCTION__)); | |||
6325 | Parser.Lex(); // Eat ':' | |||
6326 | ||||
6327 | if (getLexer().isNot(AsmToken::Identifier)) { | |||
6328 | Error(Parser.getTok().getLoc(), "expected prefix identifier in operand"); | |||
6329 | return true; | |||
6330 | } | |||
6331 | ||||
6332 | enum { | |||
6333 | COFF = (1 << MCContext::IsCOFF), | |||
6334 | ELF = (1 << MCContext::IsELF), | |||
6335 | MACHO = (1 << MCContext::IsMachO), | |||
6336 | WASM = (1 << MCContext::IsWasm), | |||
6337 | }; | |||
6338 | static const struct PrefixEntry { | |||
6339 | const char *Spelling; | |||
6340 | ARMMCExpr::VariantKind VariantKind; | |||
6341 | uint8_t SupportedFormats; | |||
6342 | } PrefixEntries[] = { | |||
6343 | { "lower16", ARMMCExpr::VK_ARM_LO16, COFF | ELF | MACHO }, | |||
6344 | { "upper16", ARMMCExpr::VK_ARM_HI16, COFF | ELF | MACHO }, | |||
6345 | }; | |||
6346 | ||||
6347 | StringRef IDVal = Parser.getTok().getIdentifier(); | |||
6348 | ||||
6349 | const auto &Prefix = | |||
6350 | llvm::find_if(PrefixEntries, [&IDVal](const PrefixEntry &PE) { | |||
6351 | return PE.Spelling == IDVal; | |||
6352 | }); | |||
6353 | if (Prefix == std::end(PrefixEntries)) { | |||
6354 | Error(Parser.getTok().getLoc(), "unexpected prefix in operand"); | |||
6355 | return true; | |||
6356 | } | |||
6357 | ||||
6358 | uint8_t CurrentFormat; | |||
6359 | switch (getContext().getObjectFileType()) { | |||
6360 | case MCContext::IsMachO: | |||
6361 | CurrentFormat = MACHO; | |||
6362 | break; | |||
6363 | case MCContext::IsELF: | |||
6364 | CurrentFormat = ELF; | |||
6365 | break; | |||
6366 | case MCContext::IsCOFF: | |||
6367 | CurrentFormat = COFF; | |||
6368 | break; | |||
6369 | case MCContext::IsWasm: | |||
6370 | CurrentFormat = WASM; | |||
6371 | break; | |||
6372 | case MCContext::IsGOFF: | |||
6373 | case MCContext::IsXCOFF: | |||
6374 | llvm_unreachable("unexpected object format")::llvm::llvm_unreachable_internal("unexpected object format", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 6374); | |||
6375 | break; | |||
6376 | } | |||
6377 | ||||
6378 | if (~Prefix->SupportedFormats & CurrentFormat) { | |||
6379 | Error(Parser.getTok().getLoc(), | |||
6380 | "cannot represent relocation in the current file format"); | |||
6381 | return true; | |||
6382 | } | |||
6383 | ||||
6384 | RefKind = Prefix->VariantKind; | |||
6385 | Parser.Lex(); | |||
6386 | ||||
6387 | if (getLexer().isNot(AsmToken::Colon)) { | |||
6388 | Error(Parser.getTok().getLoc(), "unexpected token after prefix"); | |||
6389 | return true; | |||
6390 | } | |||
6391 | Parser.Lex(); // Eat the last ':' | |||
6392 | ||||
6393 | return false; | |||
6394 | } | |||
6395 | ||||
6396 | /// Given a mnemonic, split out possible predication code and carry | |||
6397 | /// setting letters to form a canonical mnemonic and flags. | |||
6398 | // | |||
6399 | // FIXME: Would be nice to autogen this. | |||
6400 | // FIXME: This is a bit of a maze of special cases. | |||
6401 | StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, | |||
6402 | StringRef ExtraToken, | |||
6403 | unsigned &PredicationCode, | |||
6404 | unsigned &VPTPredicationCode, | |||
6405 | bool &CarrySetting, | |||
6406 | unsigned &ProcessorIMod, | |||
6407 | StringRef &ITMask) { | |||
6408 | PredicationCode = ARMCC::AL; | |||
6409 | VPTPredicationCode = ARMVCC::None; | |||
6410 | CarrySetting = false; | |||
6411 | ProcessorIMod = 0; | |||
6412 | ||||
6413 | // Ignore some mnemonics we know aren't predicated forms. | |||
6414 | // | |||
6415 | // FIXME: Would be nice to autogen this. | |||
6416 | if ((Mnemonic == "movs" && isThumb()) || | |||
6417 | Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" || | |||
6418 | Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" || | |||
6419 | Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" || | |||
6420 | Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" || | |||
6421 | Mnemonic == "vaclt" || Mnemonic == "vacle" || Mnemonic == "hlt" || | |||
6422 | Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" || | |||
6423 | Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" || | |||
6424 | Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" || | |||
6425 | Mnemonic == "fmuls" || Mnemonic == "vmaxnm" || Mnemonic == "vminnm" || | |||
6426 | Mnemonic == "vcvta" || Mnemonic == "vcvtn" || Mnemonic == "vcvtp" || | |||
6427 | Mnemonic == "vcvtm" || Mnemonic == "vrinta" || Mnemonic == "vrintn" || | |||
6428 | Mnemonic == "vrintp" || Mnemonic == "vrintm" || Mnemonic == "hvc" || | |||
6429 | Mnemonic.startswith("vsel") || Mnemonic == "vins" || Mnemonic == "vmovx" || | |||
6430 | Mnemonic == "bxns" || Mnemonic == "blxns" || | |||
6431 | Mnemonic == "vdot" || Mnemonic == "vmmla" || | |||
6432 | Mnemonic == "vudot" || Mnemonic == "vsdot" || | |||
6433 | Mnemonic == "vcmla" || Mnemonic == "vcadd" || | |||
6434 | Mnemonic == "vfmal" || Mnemonic == "vfmsl" || | |||
6435 | Mnemonic == "wls" || Mnemonic == "le" || Mnemonic == "dls" || | |||
6436 | Mnemonic == "csel" || Mnemonic == "csinc" || | |||
6437 | Mnemonic == "csinv" || Mnemonic == "csneg" || Mnemonic == "cinc" || | |||
6438 | Mnemonic == "cinv" || Mnemonic == "cneg" || Mnemonic == "cset" || | |||
6439 | Mnemonic == "csetm") | |||
6440 | return Mnemonic; | |||
6441 | ||||
6442 | // First, split out any predication code. Ignore mnemonics we know aren't | |||
6443 | // predicated but do have a carry-set and so weren't caught above. | |||
6444 | if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" && | |||
6445 | Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" && | |||
6446 | Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" && | |||
6447 | Mnemonic != "sbcs" && Mnemonic != "rscs" && | |||
6448 | !(hasMVE() && | |||
6449 | (Mnemonic == "vmine" || | |||
6450 | Mnemonic == "vshle" || Mnemonic == "vshlt" || Mnemonic == "vshllt" || | |||
6451 | Mnemonic == "vrshle" || Mnemonic == "vrshlt" || | |||
6452 | Mnemonic == "vmvne" || Mnemonic == "vorne" || | |||
6453 | Mnemonic == "vnege" || Mnemonic == "vnegt" || | |||
6454 | Mnemonic == "vmule" || Mnemonic == "vmult" || | |||
6455 | Mnemonic == "vrintne" || | |||
6456 | Mnemonic == "vcmult" || Mnemonic == "vcmule" || | |||
6457 | Mnemonic == "vpsele" || Mnemonic == "vpselt" || | |||
6458 | Mnemonic.startswith("vq")))) { | |||
6459 | unsigned CC = ARMCondCodeFromString(Mnemonic.substr(Mnemonic.size()-2)); | |||
6460 | if (CC != ~0U) { | |||
6461 | Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2); | |||
6462 | PredicationCode = CC; | |||
6463 | } | |||
6464 | } | |||
6465 | ||||
6466 | // Next, determine if we have a carry setting bit. We explicitly ignore all | |||
6467 | // the instructions we know end in 's'. | |||
6468 | if (Mnemonic.endswith("s") && | |||
6469 | !(Mnemonic == "cps" || Mnemonic == "mls" || | |||
6470 | Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" || | |||
6471 | Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" || | |||
6472 | Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" || | |||
6473 | Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" || | |||
6474 | Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" || | |||
6475 | Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" || | |||
6476 | Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" || | |||
6477 | Mnemonic == "vfms" || Mnemonic == "vfnms" || Mnemonic == "fconsts" || | |||
6478 | Mnemonic == "bxns" || Mnemonic == "blxns" || Mnemonic == "vfmas" || | |||
6479 | Mnemonic == "vmlas" || | |||
6480 | (Mnemonic == "movs" && isThumb()))) { | |||
6481 | Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1); | |||
6482 | CarrySetting = true; | |||
6483 | } | |||
6484 | ||||
6485 | // The "cps" instruction can have a interrupt mode operand which is glued into | |||
6486 | // the mnemonic. Check if this is the case, split it and parse the imod op | |||
6487 | if (Mnemonic.startswith("cps")) { | |||
6488 | // Split out any imod code. | |||
6489 | unsigned IMod = | |||
6490 | StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2)) | |||
6491 | .Case("ie", ARM_PROC::IE) | |||
6492 | .Case("id", ARM_PROC::ID) | |||
6493 | .Default(~0U); | |||
6494 | if (IMod != ~0U) { | |||
6495 | Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2); | |||
6496 | ProcessorIMod = IMod; | |||
6497 | } | |||
6498 | } | |||
6499 | ||||
6500 | if (isMnemonicVPTPredicable(Mnemonic, ExtraToken) && Mnemonic != "vmovlt" && | |||
6501 | Mnemonic != "vshllt" && Mnemonic != "vrshrnt" && Mnemonic != "vshrnt" && | |||
6502 | Mnemonic != "vqrshrunt" && Mnemonic != "vqshrunt" && | |||
6503 | Mnemonic != "vqrshrnt" && Mnemonic != "vqshrnt" && Mnemonic != "vmullt" && | |||
6504 | Mnemonic != "vqmovnt" && Mnemonic != "vqmovunt" && | |||
6505 | Mnemonic != "vqmovnt" && Mnemonic != "vmovnt" && Mnemonic != "vqdmullt" && | |||
6506 | Mnemonic != "vpnot" && Mnemonic != "vcvtt" && Mnemonic != "vcvt") { | |||
6507 | unsigned CC = ARMVectorCondCodeFromString(Mnemonic.substr(Mnemonic.size()-1)); | |||
6508 | if (CC != ~0U) { | |||
6509 | Mnemonic = Mnemonic.slice(0, Mnemonic.size()-1); | |||
6510 | VPTPredicationCode = CC; | |||
6511 | } | |||
6512 | return Mnemonic; | |||
6513 | } | |||
6514 | ||||
6515 | // The "it" instruction has the condition mask on the end of the mnemonic. | |||
6516 | if (Mnemonic.startswith("it")) { | |||
6517 | ITMask = Mnemonic.slice(2, Mnemonic.size()); | |||
6518 | Mnemonic = Mnemonic.slice(0, 2); | |||
6519 | } | |||
6520 | ||||
6521 | if (Mnemonic.startswith("vpst")) { | |||
6522 | ITMask = Mnemonic.slice(4, Mnemonic.size()); | |||
6523 | Mnemonic = Mnemonic.slice(0, 4); | |||
6524 | } | |||
6525 | else if (Mnemonic.startswith("vpt")) { | |||
6526 | ITMask = Mnemonic.slice(3, Mnemonic.size()); | |||
6527 | Mnemonic = Mnemonic.slice(0, 3); | |||
6528 | } | |||
6529 | ||||
6530 | return Mnemonic; | |||
6531 | } | |||
6532 | ||||
6533 | /// Given a canonical mnemonic, determine if the instruction ever allows | |||
6534 | /// inclusion of carry set or predication code operands. | |||
6535 | // | |||
6536 | // FIXME: It would be nice to autogen this. | |||
6537 | void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic, | |||
6538 | StringRef ExtraToken, | |||
6539 | StringRef FullInst, | |||
6540 | bool &CanAcceptCarrySet, | |||
6541 | bool &CanAcceptPredicationCode, | |||
6542 | bool &CanAcceptVPTPredicationCode) { | |||
6543 | CanAcceptVPTPredicationCode = isMnemonicVPTPredicable(Mnemonic, ExtraToken); | |||
6544 | ||||
6545 | CanAcceptCarrySet = | |||
6546 | Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" || | |||
6547 | Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" || | |||
6548 | Mnemonic == "add" || Mnemonic == "adc" || Mnemonic == "mul" || | |||
6549 | Mnemonic == "bic" || Mnemonic == "asr" || Mnemonic == "orr" || | |||
6550 | Mnemonic == "mvn" || Mnemonic == "rsb" || Mnemonic == "rsc" || | |||
6551 | Mnemonic == "orn" || Mnemonic == "sbc" || Mnemonic == "eor" || | |||
6552 | Mnemonic == "neg" || Mnemonic == "vfm" || Mnemonic == "vfnm" || | |||
6553 | (!isThumb() && | |||
6554 | (Mnemonic == "smull" || Mnemonic == "mov" || Mnemonic == "mla" || | |||
6555 | Mnemonic == "smlal" || Mnemonic == "umlal" || Mnemonic == "umull")); | |||
6556 | ||||
6557 | if (Mnemonic == "bkpt" || Mnemonic == "cbnz" || Mnemonic == "setend" || | |||
6558 | Mnemonic == "cps" || Mnemonic == "it" || Mnemonic == "cbz" || | |||
6559 | Mnemonic == "trap" || Mnemonic == "hlt" || Mnemonic == "udf" || | |||
6560 | Mnemonic.startswith("crc32") || Mnemonic.startswith("cps") || | |||
6561 | Mnemonic.startswith("vsel") || Mnemonic == "vmaxnm" || | |||
6562 | Mnemonic == "vminnm" || Mnemonic == "vcvta" || Mnemonic == "vcvtn" || | |||
6563 | Mnemonic == "vcvtp" || Mnemonic == "vcvtm" || Mnemonic == "vrinta" || | |||
6564 | Mnemonic == "vrintn" || Mnemonic == "vrintp" || Mnemonic == "vrintm" || | |||
6565 | Mnemonic.startswith("aes") || Mnemonic == "hvc" || Mnemonic == "setpan" || | |||
6566 | Mnemonic.startswith("sha1") || Mnemonic.startswith("sha256") || | |||
6567 | (FullInst.startswith("vmull") && FullInst.endswith(".p64")) || | |||
6568 | Mnemonic == "vmovx" || Mnemonic == "vins" || | |||
6569 | Mnemonic == "vudot" || Mnemonic == "vsdot" || | |||
6570 | Mnemonic == "vcmla" || Mnemonic == "vcadd" || | |||
6571 | Mnemonic == "vfmal" || Mnemonic == "vfmsl" || | |||
6572 | Mnemonic == "vfmat" || Mnemonic == "vfmab" || | |||
6573 | Mnemonic == "vdot" || Mnemonic == "vmmla" || | |||
6574 | Mnemonic == "sb" || Mnemonic == "ssbb" || | |||
6575 | Mnemonic == "pssbb" || Mnemonic == "vsmmla" || | |||
6576 | Mnemonic == "vummla" || Mnemonic == "vusmmla" || | |||
6577 | Mnemonic == "vusdot" || Mnemonic == "vsudot" || | |||
6578 | Mnemonic == "bfcsel" || Mnemonic == "wls" || | |||
6579 | Mnemonic == "dls" || Mnemonic == "le" || Mnemonic == "csel" || | |||
6580 | Mnemonic == "csinc" || Mnemonic == "csinv" || Mnemonic == "csneg" || | |||
6581 | Mnemonic == "cinc" || Mnemonic == "cinv" || Mnemonic == "cneg" || | |||
6582 | Mnemonic == "cset" || Mnemonic == "csetm" || | |||
6583 | Mnemonic.startswith("vpt") || Mnemonic.startswith("vpst") || | |||
6584 | (hasCDE() && MS.isCDEInstr(Mnemonic) && | |||
6585 | !MS.isITPredicableCDEInstr(Mnemonic)) || | |||
6586 | (hasMVE() && | |||
6587 | (Mnemonic.startswith("vst2") || Mnemonic.startswith("vld2") || | |||
6588 | Mnemonic.startswith("vst4") || Mnemonic.startswith("vld4") || | |||
6589 | Mnemonic.startswith("wlstp") || Mnemonic.startswith("dlstp") || | |||
6590 | Mnemonic.startswith("letp")))) { | |||
6591 | // These mnemonics are never predicable | |||
6592 | CanAcceptPredicationCode = false; | |||
6593 | } else if (!isThumb()) { | |||
6594 | // Some instructions are only predicable in Thumb mode | |||
6595 | CanAcceptPredicationCode = | |||
6596 | Mnemonic != "cdp2" && Mnemonic != "clrex" && Mnemonic != "mcr2" && | |||
6597 | Mnemonic != "mcrr2" && Mnemonic != "mrc2" && Mnemonic != "mrrc2" && | |||
6598 | Mnemonic != "dmb" && Mnemonic != "dfb" && Mnemonic != "dsb" && | |||
6599 | Mnemonic != "isb" && Mnemonic != "pld" && Mnemonic != "pli" && | |||
6600 | Mnemonic != "pldw" && Mnemonic != "ldc2" && Mnemonic != "ldc2l" && | |||
6601 | Mnemonic != "stc2" && Mnemonic != "stc2l" && | |||
6602 | Mnemonic != "tsb" && | |||
6603 | !Mnemonic.startswith("rfe") && !Mnemonic.startswith("srs"); | |||
6604 | } else if (isThumbOne()) { | |||
6605 | if (hasV6MOps()) | |||
6606 | CanAcceptPredicationCode = Mnemonic != "movs"; | |||
6607 | else | |||
6608 | CanAcceptPredicationCode = Mnemonic != "nop" && Mnemonic != "movs"; | |||
6609 | } else | |||
6610 | CanAcceptPredicationCode = true; | |||
6611 | } | |||
6612 | ||||
6613 | // Some Thumb instructions have two operand forms that are not | |||
6614 | // available as three operand, convert to two operand form if possible. | |||
6615 | // | |||
6616 | // FIXME: We would really like to be able to tablegen'erate this. | |||
6617 | void ARMAsmParser::tryConvertingToTwoOperandForm(StringRef Mnemonic, | |||
6618 | bool CarrySetting, | |||
6619 | OperandVector &Operands) { | |||
6620 | if (Operands.size() != 6) | |||
6621 | return; | |||
6622 | ||||
6623 | const auto &Op3 = static_cast<ARMOperand &>(*Operands[3]); | |||
6624 | auto &Op4 = static_cast<ARMOperand &>(*Operands[4]); | |||
6625 | if (!Op3.isReg() || !Op4.isReg()) | |||
6626 | return; | |||
6627 | ||||
6628 | auto Op3Reg = Op3.getReg(); | |||
6629 | auto Op4Reg = Op4.getReg(); | |||
6630 | ||||
6631 | // For most Thumb2 cases we just generate the 3 operand form and reduce | |||
6632 | // it in processInstruction(), but the 3 operand form of ADD (t2ADDrr) | |||
6633 | // won't accept SP or PC so we do the transformation here taking care | |||
6634 | // with immediate range in the 'add sp, sp #imm' case. | |||
6635 | auto &Op5 = static_cast<ARMOperand &>(*Operands[5]); | |||
6636 | if (isThumbTwo()) { | |||
6637 | if (Mnemonic != "add") | |||
6638 | return; | |||
6639 | bool TryTransform = Op3Reg == ARM::PC || Op4Reg == ARM::PC || | |||
6640 | (Op5.isReg() && Op5.getReg() == ARM::PC); | |||
6641 | if (!TryTransform) { | |||
6642 | TryTransform = (Op3Reg == ARM::SP || Op4Reg == ARM::SP || | |||
6643 | (Op5.isReg() && Op5.getReg() == ARM::SP)) && | |||
6644 | !(Op3Reg == ARM::SP && Op4Reg == ARM::SP && | |||
6645 | Op5.isImm() && !Op5.isImm0_508s4()); | |||
6646 | } | |||
6647 | if (!TryTransform) | |||
6648 | return; | |||
6649 | } else if (!isThumbOne()) | |||
6650 | return; | |||
6651 | ||||
6652 | if (!(Mnemonic == "add" || Mnemonic == "sub" || Mnemonic == "and" || | |||
6653 | Mnemonic == "eor" || Mnemonic == "lsl" || Mnemonic == "lsr" || | |||
6654 | Mnemonic == "asr" || Mnemonic == "adc" || Mnemonic == "sbc" || | |||
6655 | Mnemonic == "ror" || Mnemonic == "orr" || Mnemonic == "bic")) | |||
6656 | return; | |||
6657 | ||||
6658 | // If first 2 operands of a 3 operand instruction are the same | |||
6659 | // then transform to 2 operand version of the same instruction | |||
6660 | // e.g. 'adds r0, r0, #1' transforms to 'adds r0, #1' | |||
6661 | bool Transform = Op3Reg == Op4Reg; | |||
6662 | ||||
6663 | // For communtative operations, we might be able to transform if we swap | |||
6664 | // Op4 and Op5. The 'ADD Rdm, SP, Rdm' form is already handled specially | |||
6665 | // as tADDrsp. | |||
6666 | const ARMOperand *LastOp = &Op5; | |||
6667 | bool Swap = false; | |||
6668 | if (!Transform && Op5.isReg() && Op3Reg == Op5.getReg() && | |||
6669 | ((Mnemonic == "add" && Op4Reg != ARM::SP) || | |||
6670 | Mnemonic == "and" || Mnemonic == "eor" || | |||
6671 | Mnemonic == "adc" || Mnemonic == "orr")) { | |||
6672 | Swap = true; | |||
6673 | LastOp = &Op4; | |||
6674 | Transform = true; | |||
6675 | } | |||
6676 | ||||
6677 | // If both registers are the same then remove one of them from | |||
6678 | // the operand list, with certain exceptions. | |||
6679 | if (Transform) { | |||
6680 | // Don't transform 'adds Rd, Rd, Rm' or 'sub{s} Rd, Rd, Rm' because the | |||
6681 | // 2 operand forms don't exist. | |||
6682 | if (((Mnemonic == "add" && CarrySetting) || Mnemonic == "sub") && | |||
6683 | LastOp->isReg()) | |||
6684 | Transform = false; | |||
6685 | ||||
6686 | // Don't transform 'add/sub{s} Rd, Rd, #imm' if the immediate fits into | |||
6687 | // 3-bits because the ARMARM says not to. | |||
6688 | if ((Mnemonic == "add" || Mnemonic == "sub") && LastOp->isImm0_7()) | |||
6689 | Transform = false; | |||
6690 | } | |||
6691 | ||||
6692 | if (Transform) { | |||
6693 | if (Swap) | |||
6694 | std::swap(Op4, Op5); | |||
6695 | Operands.erase(Operands.begin() + 3); | |||
6696 | } | |||
6697 | } | |||
6698 | ||||
6699 | bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, | |||
6700 | OperandVector &Operands) { | |||
6701 | // FIXME: This is all horribly hacky. We really need a better way to deal | |||
6702 | // with optional operands like this in the matcher table. | |||
6703 | ||||
6704 | // The 'mov' mnemonic is special. One variant has a cc_out operand, while | |||
6705 | // another does not. Specifically, the MOVW instruction does not. So we | |||
6706 | // special case it here and remove the defaulted (non-setting) cc_out | |||
6707 | // operand if that's the instruction we're trying to match. | |||
6708 | // | |||
6709 | // We do this as post-processing of the explicit operands rather than just | |||
6710 | // conditionally adding the cc_out in the first place because we need | |||
6711 | // to check the type of the parsed immediate operand. | |||
6712 | if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() && | |||
6713 | !static_cast<ARMOperand &>(*Operands[4]).isModImm() && | |||
6714 | static_cast<ARMOperand &>(*Operands[4]).isImm0_65535Expr() && | |||
6715 | static_cast<ARMOperand &>(*Operands[1]).getReg() == 0) | |||
6716 | return true; | |||
6717 | ||||
6718 | // Register-register 'add' for thumb does not have a cc_out operand | |||
6719 | // when there are only two register operands. | |||
6720 | if (isThumb() && Mnemonic == "add" && Operands.size() == 5 && | |||
6721 | static_cast<ARMOperand &>(*Operands[3]).isReg() && | |||
6722 | static_cast<ARMOperand &>(*Operands[4]).isReg() && | |||
6723 | static_cast<ARMOperand &>(*Operands[1]).getReg() == 0) | |||
6724 | return true; | |||
6725 | // Register-register 'add' for thumb does not have a cc_out operand | |||
6726 | // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do | |||
6727 | // have to check the immediate range here since Thumb2 has a variant | |||
6728 | // that can handle a different range and has a cc_out operand. | |||
6729 | if (((isThumb() && Mnemonic == "add") || | |||
6730 | (isThumbTwo() && Mnemonic == "sub")) && | |||
6731 | Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() && | |||
6732 | static_cast<ARMOperand &>(*Operands[4]).isReg() && | |||
6733 | static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::SP && | |||
6734 | static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 && | |||
6735 | ((Mnemonic == "add" && static_cast<ARMOperand &>(*Operands[5]).isReg()) || | |||
6736 | static_cast<ARMOperand &>(*Operands[5]).isImm0_1020s4())) | |||
6737 | return true; | |||
6738 | // For Thumb2, add/sub immediate does not have a cc_out operand for the | |||
6739 | // imm0_4095 variant. That's the least-preferred variant when | |||
6740 | // selecting via the generic "add" mnemonic, so to know that we | |||
6741 | // should remove the cc_out operand, we have to explicitly check that | |||
6742 | // it's not one of the other variants. Ugh. | |||
6743 | if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") && | |||
6744 | Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() && | |||
6745 | static_cast<ARMOperand &>(*Operands[4]).isReg() && | |||
6746 | static_cast<ARMOperand &>(*Operands[5]).isImm()) { | |||
6747 | // Nest conditions rather than one big 'if' statement for readability. | |||
6748 | // | |||
6749 | // If both registers are low, we're in an IT block, and the immediate is | |||
6750 | // in range, we should use encoding T1 instead, which has a cc_out. | |||
6751 | if (inITBlock() && | |||
6752 | isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) && | |||
6753 | isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) && | |||
6754 | static_cast<ARMOperand &>(*Operands[5]).isImm0_7()) | |||
6755 | return false; | |||
6756 | // Check against T3. If the second register is the PC, this is an | |||
6757 | // alternate form of ADR, which uses encoding T4, so check for that too. | |||
6758 | if (static_cast<ARMOperand &>(*Operands[4]).getReg() != ARM::PC && | |||
6759 | (static_cast<ARMOperand &>(*Operands[5]).isT2SOImm() || | |||
6760 | static_cast<ARMOperand &>(*Operands[5]).isT2SOImmNeg())) | |||
6761 | return false; | |||
6762 | ||||
6763 | // Otherwise, we use encoding T4, which does not have a cc_out | |||
6764 | // operand. | |||
6765 | return true; | |||
6766 | } | |||
6767 | ||||
6768 | // The thumb2 multiply instruction doesn't have a CCOut register, so | |||
6769 | // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to | |||
6770 | // use the 16-bit encoding or not. | |||
6771 | if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 && | |||
6772 | static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 && | |||
6773 | static_cast<ARMOperand &>(*Operands[3]).isReg() && | |||
6774 | static_cast<ARMOperand &>(*Operands[4]).isReg() && | |||
6775 | static_cast<ARMOperand &>(*Operands[5]).isReg() && | |||
6776 | // If the registers aren't low regs, the destination reg isn't the | |||
6777 | // same as one of the source regs, or the cc_out operand is zero | |||
6778 | // outside of an IT block, we have to use the 32-bit encoding, so | |||
6779 | // remove the cc_out operand. | |||
6780 | (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) || | |||
6781 | !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) || | |||
6782 | !isARMLowRegister(static_cast<ARMOperand &>(*Operands[5]).getReg()) || | |||
6783 | !inITBlock() || (static_cast<ARMOperand &>(*Operands[3]).getReg() != | |||
6784 | static_cast<ARMOperand &>(*Operands[5]).getReg() && | |||
6785 | static_cast<ARMOperand &>(*Operands[3]).getReg() != | |||
6786 | static_cast<ARMOperand &>(*Operands[4]).getReg()))) | |||
6787 | return true; | |||
6788 | ||||
6789 | // Also check the 'mul' syntax variant that doesn't specify an explicit | |||
6790 | // destination register. | |||
6791 | if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 && | |||
6792 | static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 && | |||
6793 | static_cast<ARMOperand &>(*Operands[3]).isReg() && | |||
6794 | static_cast<ARMOperand &>(*Operands[4]).isReg() && | |||
6795 | // If the registers aren't low regs or the cc_out operand is zero | |||
6796 | // outside of an IT block, we have to use the 32-bit encoding, so | |||
6797 | // remove the cc_out operand. | |||
6798 | (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) || | |||
6799 | !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) || | |||
6800 | !inITBlock())) | |||
6801 | return true; | |||
6802 | ||||
6803 | // Register-register 'add/sub' for thumb does not have a cc_out operand | |||
6804 | // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also | |||
6805 | // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't | |||
6806 | // right, this will result in better diagnostics (which operand is off) | |||
6807 | // anyway. | |||
6808 | if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") && | |||
6809 | (Operands.size() == 5 || Operands.size() == 6) && | |||
6810 | static_cast<ARMOperand &>(*Operands[3]).isReg() && | |||
6811 | static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::SP && | |||
6812 | static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 && | |||
6813 | (static_cast<ARMOperand &>(*Operands[4]).isImm() || | |||
6814 | (Operands.size() == 6 && | |||
6815 | static_cast<ARMOperand &>(*Operands[5]).isImm()))) { | |||
6816 | // Thumb2 (add|sub){s}{p}.w GPRnopc, sp, #{T2SOImm} has cc_out | |||
6817 | return (!(isThumbTwo() && | |||
6818 | (static_cast<ARMOperand &>(*Operands[4]).isT2SOImm() || | |||
6819 | static_cast<ARMOperand &>(*Operands[4]).isT2SOImmNeg()))); | |||
6820 | } | |||
6821 | // Fixme: Should join all the thumb+thumb2 (add|sub) in a single if case | |||
6822 | // Thumb2 ADD r0, #4095 -> ADDW r0, r0, #4095 (T4) | |||
6823 | // Thumb2 SUB r0, #4095 -> SUBW r0, r0, #4095 | |||
6824 | if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") && | |||
6825 | (Operands.size() == 5) && | |||
6826 | static_cast<ARMOperand &>(*Operands[3]).isReg() && | |||
6827 | static_cast<ARMOperand &>(*Operands[3]).getReg() != ARM::SP && | |||
6828 | static_cast<ARMOperand &>(*Operands[3]).getReg() != ARM::PC && | |||
6829 | static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 && | |||
6830 | static_cast<ARMOperand &>(*Operands[4]).isImm()) { | |||
6831 | const ARMOperand &IMM = static_cast<ARMOperand &>(*Operands[4]); | |||
6832 | if (IMM.isT2SOImm() || IMM.isT2SOImmNeg()) | |||
6833 | return false; // add.w / sub.w | |||
6834 | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(IMM.getImm())) { | |||
6835 | const int64_t Value = CE->getValue(); | |||
6836 | // Thumb1 imm8 sub / add | |||
6837 | if ((Value < ((1 << 7) - 1) << 2) && inITBlock() && (!(Value & 3)) && | |||
6838 | isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg())) | |||
6839 | return false; | |||
6840 | return true; // Thumb2 T4 addw / subw | |||
6841 | } | |||
6842 | } | |||
6843 | return false; | |||
6844 | } | |||
6845 | ||||
6846 | bool ARMAsmParser::shouldOmitPredicateOperand(StringRef Mnemonic, | |||
6847 | OperandVector &Operands) { | |||
6848 | // VRINT{Z, X} have a predicate operand in VFP, but not in NEON | |||
6849 | unsigned RegIdx = 3; | |||
6850 | if ((((Mnemonic == "vrintz" || Mnemonic == "vrintx") && !hasMVE()) || | |||
6851 | Mnemonic == "vrintr") && | |||
6852 | (static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f32" || | |||
6853 | static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f16")) { | |||
6854 | if (static_cast<ARMOperand &>(*Operands[3]).isToken() && | |||
6855 | (static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f32" || | |||
6856 | static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f16")) | |||
6857 | RegIdx = 4; | |||
6858 | ||||
6859 | if (static_cast<ARMOperand &>(*Operands[RegIdx]).isReg() && | |||
6860 | (ARMMCRegisterClasses[ARM::DPRRegClassID].contains( | |||
6861 | static_cast<ARMOperand &>(*Operands[RegIdx]).getReg()) || | |||
6862 | ARMMCRegisterClasses[ARM::QPRRegClassID].contains( | |||
6863 | static_cast<ARMOperand &>(*Operands[RegIdx]).getReg()))) | |||
6864 | return true; | |||
6865 | } | |||
6866 | return false; | |||
6867 | } | |||
6868 | ||||
6869 | bool ARMAsmParser::shouldOmitVectorPredicateOperand(StringRef Mnemonic, | |||
6870 | OperandVector &Operands) { | |||
6871 | if (!hasMVE() || Operands.size() < 3) | |||
6872 | return true; | |||
6873 | ||||
6874 | if (Mnemonic.startswith("vld2") || Mnemonic.startswith("vld4") || | |||
6875 | Mnemonic.startswith("vst2") || Mnemonic.startswith("vst4")) | |||
6876 | return true; | |||
6877 | ||||
6878 | if (Mnemonic.startswith("vctp") || Mnemonic.startswith("vpnot")) | |||
6879 | return false; | |||
6880 | ||||
6881 | if (Mnemonic.startswith("vmov") && | |||
6882 | !(Mnemonic.startswith("vmovl") || Mnemonic.startswith("vmovn") || | |||
6883 | Mnemonic.startswith("vmovx"))) { | |||
6884 | for (auto &Operand : Operands) { | |||
6885 | if (static_cast<ARMOperand &>(*Operand).isVectorIndex() || | |||
6886 | ((*Operand).isReg() && | |||
6887 | (ARMMCRegisterClasses[ARM::SPRRegClassID].contains( | |||
6888 | (*Operand).getReg()) || | |||
6889 | ARMMCRegisterClasses[ARM::DPRRegClassID].contains( | |||
6890 | (*Operand).getReg())))) { | |||
6891 | return true; | |||
6892 | } | |||
6893 | } | |||
6894 | return false; | |||
6895 | } else { | |||
6896 | for (auto &Operand : Operands) { | |||
6897 | // We check the larger class QPR instead of just the legal class | |||
6898 | // MQPR, to more accurately report errors when using Q registers | |||
6899 | // outside of the allowed range. | |||
6900 | if (static_cast<ARMOperand &>(*Operand).isVectorIndex() || | |||
6901 | (Operand->isReg() && | |||
6902 | (ARMMCRegisterClasses[ARM::QPRRegClassID].contains( | |||
6903 | Operand->getReg())))) | |||
6904 | return false; | |||
6905 | } | |||
6906 | return true; | |||
6907 | } | |||
6908 | } | |||
6909 | ||||
6910 | static bool isDataTypeToken(StringRef Tok) { | |||
6911 | return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" || | |||
6912 | Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" || | |||
6913 | Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" || | |||
6914 | Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" || | |||
6915 | Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" || | |||
6916 | Tok == ".f" || Tok == ".d"; | |||
6917 | } | |||
6918 | ||||
6919 | // FIXME: This bit should probably be handled via an explicit match class | |||
6920 | // in the .td files that matches the suffix instead of having it be | |||
6921 | // a literal string token the way it is now. | |||
6922 | static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) { | |||
6923 | return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm"); | |||
6924 | } | |||
6925 | ||||
6926 | static void applyMnemonicAliases(StringRef &Mnemonic, | |||
6927 | const FeatureBitset &Features, | |||
6928 | unsigned VariantID); | |||
6929 | ||||
6930 | // The GNU assembler has aliases of ldrd and strd with the second register | |||
6931 | // omitted. We don't have a way to do that in tablegen, so fix it up here. | |||
6932 | // | |||
6933 | // We have to be careful to not emit an invalid Rt2 here, because the rest of | |||
6934 | // the assembly parser could then generate confusing diagnostics refering to | |||
6935 | // it. If we do find anything that prevents us from doing the transformation we | |||
6936 | // bail out, and let the assembly parser report an error on the instruction as | |||
6937 | // it is written. | |||
6938 | void ARMAsmParser::fixupGNULDRDAlias(StringRef Mnemonic, | |||
6939 | OperandVector &Operands) { | |||
6940 | if (Mnemonic != "ldrd" && Mnemonic != "strd") | |||
6941 | return; | |||
6942 | if (Operands.size() < 4) | |||
6943 | return; | |||
6944 | ||||
6945 | ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[2]); | |||
6946 | ARMOperand &Op3 = static_cast<ARMOperand &>(*Operands[3]); | |||
6947 | ||||
6948 | if (!Op2.isReg()) | |||
6949 | return; | |||
6950 | if (!Op3.isGPRMem()) | |||
6951 | return; | |||
6952 | ||||
6953 | const MCRegisterClass &GPR = MRI->getRegClass(ARM::GPRRegClassID); | |||
6954 | if (!GPR.contains(Op2.getReg())) | |||
6955 | return; | |||
6956 | ||||
6957 | unsigned RtEncoding = MRI->getEncodingValue(Op2.getReg()); | |||
6958 | if (!isThumb() && (RtEncoding & 1)) { | |||
6959 | // In ARM mode, the registers must be from an aligned pair, this | |||
6960 | // restriction does not apply in Thumb mode. | |||
6961 | return; | |||
6962 | } | |||
6963 | if (Op2.getReg() == ARM::PC) | |||
6964 | return; | |||
6965 | unsigned PairedReg = GPR.getRegister(RtEncoding + 1); | |||
6966 | if (!PairedReg || PairedReg == ARM::PC || | |||
6967 | (PairedReg == ARM::SP && !hasV8Ops())) | |||
6968 | return; | |||
6969 | ||||
6970 | Operands.insert( | |||
6971 | Operands.begin() + 3, | |||
6972 | ARMOperand::CreateReg(PairedReg, Op2.getStartLoc(), Op2.getEndLoc())); | |||
6973 | } | |||
6974 | ||||
6975 | // Dual-register instruction have the following syntax: | |||
6976 | // <mnemonic> <predicate>? <coproc>, <Rdest>, <Rdest+1>, <Rsrc>, ..., #imm | |||
6977 | // This function tries to remove <Rdest+1> and replace <Rdest> with a pair | |||
6978 | // operand. If the conversion fails an error is diagnosed, and the function | |||
6979 | // returns true. | |||
6980 | bool ARMAsmParser::CDEConvertDualRegOperand(StringRef Mnemonic, | |||
6981 | OperandVector &Operands) { | |||
6982 | assert(MS.isCDEDualRegInstr(Mnemonic))(static_cast <bool> (MS.isCDEDualRegInstr(Mnemonic)) ? void (0) : __assert_fail ("MS.isCDEDualRegInstr(Mnemonic)", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 6982, __extension__ __PRETTY_FUNCTION__)); | |||
6983 | bool isPredicable = | |||
6984 | Mnemonic == "cx1da" || Mnemonic == "cx2da" || Mnemonic == "cx3da"; | |||
6985 | size_t NumPredOps = isPredicable ? 1 : 0; | |||
6986 | ||||
6987 | if (Operands.size() <= 3 + NumPredOps) | |||
6988 | return false; | |||
6989 | ||||
6990 | StringRef Op2Diag( | |||
6991 | "operand must be an even-numbered register in the range [r0, r10]"); | |||
6992 | ||||
6993 | const MCParsedAsmOperand &Op2 = *Operands[2 + NumPredOps]; | |||
6994 | if (!Op2.isReg()) | |||
6995 | return Error(Op2.getStartLoc(), Op2Diag); | |||
6996 | ||||
6997 | unsigned RNext; | |||
6998 | unsigned RPair; | |||
6999 | switch (Op2.getReg()) { | |||
7000 | default: | |||
7001 | return Error(Op2.getStartLoc(), Op2Diag); | |||
7002 | case ARM::R0: | |||
7003 | RNext = ARM::R1; | |||
7004 | RPair = ARM::R0_R1; | |||
7005 | break; | |||
7006 | case ARM::R2: | |||
7007 | RNext = ARM::R3; | |||
7008 | RPair = ARM::R2_R3; | |||
7009 | break; | |||
7010 | case ARM::R4: | |||
7011 | RNext = ARM::R5; | |||
7012 | RPair = ARM::R4_R5; | |||
7013 | break; | |||
7014 | case ARM::R6: | |||
7015 | RNext = ARM::R7; | |||
7016 | RPair = ARM::R6_R7; | |||
7017 | break; | |||
7018 | case ARM::R8: | |||
7019 | RNext = ARM::R9; | |||
7020 | RPair = ARM::R8_R9; | |||
7021 | break; | |||
7022 | case ARM::R10: | |||
7023 | RNext = ARM::R11; | |||
7024 | RPair = ARM::R10_R11; | |||
7025 | break; | |||
7026 | } | |||
7027 | ||||
7028 | const MCParsedAsmOperand &Op3 = *Operands[3 + NumPredOps]; | |||
7029 | if (!Op3.isReg() || Op3.getReg() != RNext) | |||
7030 | return Error(Op3.getStartLoc(), "operand must be a consecutive register"); | |||
7031 | ||||
7032 | Operands.erase(Operands.begin() + 3 + NumPredOps); | |||
7033 | Operands[2 + NumPredOps] = | |||
7034 | ARMOperand::CreateReg(RPair, Op2.getStartLoc(), Op2.getEndLoc()); | |||
7035 | return false; | |||
7036 | } | |||
7037 | ||||
7038 | /// Parse an arm instruction mnemonic followed by its operands. | |||
7039 | bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, | |||
7040 | SMLoc NameLoc, OperandVector &Operands) { | |||
7041 | MCAsmParser &Parser = getParser(); | |||
7042 | ||||
7043 | // Apply mnemonic aliases before doing anything else, as the destination | |||
7044 | // mnemonic may include suffices and we want to handle them normally. | |||
7045 | // The generic tblgen'erated code does this later, at the start of | |||
7046 | // MatchInstructionImpl(), but that's too late for aliases that include | |||
7047 | // any sort of suffix. | |||
7048 | const FeatureBitset &AvailableFeatures = getAvailableFeatures(); | |||
7049 | unsigned AssemblerDialect = getParser().getAssemblerDialect(); | |||
7050 | applyMnemonicAliases(Name, AvailableFeatures, AssemblerDialect); | |||
7051 | ||||
7052 | // First check for the ARM-specific .req directive. | |||
7053 | if (Parser.getTok().is(AsmToken::Identifier) && | |||
7054 | Parser.getTok().getIdentifier().lower() == ".req") { | |||
7055 | parseDirectiveReq(Name, NameLoc); | |||
7056 | // We always return 'error' for this, as we're done with this | |||
7057 | // statement and don't need to match the 'instruction." | |||
7058 | return true; | |||
7059 | } | |||
7060 | ||||
7061 | // Create the leading tokens for the mnemonic, split by '.' characters. | |||
7062 | size_t Start = 0, Next = Name.find('.'); | |||
7063 | StringRef Mnemonic = Name.slice(Start, Next); | |||
7064 | StringRef ExtraToken = Name.slice(Next, Name.find(' ', Next + 1)); | |||
7065 | ||||
7066 | // Split out the predication code and carry setting flag from the mnemonic. | |||
7067 | unsigned PredicationCode; | |||
7068 | unsigned VPTPredicationCode; | |||
7069 | unsigned ProcessorIMod; | |||
7070 | bool CarrySetting; | |||
7071 | StringRef ITMask; | |||
7072 | Mnemonic = splitMnemonic(Mnemonic, ExtraToken, PredicationCode, VPTPredicationCode, | |||
7073 | CarrySetting, ProcessorIMod, ITMask); | |||
7074 | ||||
7075 | // In Thumb1, only the branch (B) instruction can be predicated. | |||
7076 | if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") { | |||
7077 | return Error(NameLoc, "conditional execution not supported in Thumb1"); | |||
7078 | } | |||
7079 | ||||
7080 | Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc)); | |||
7081 | ||||
7082 | // Handle the mask for IT and VPT instructions. In ARMOperand and | |||
7083 | // MCOperand, this is stored in a format independent of the | |||
7084 | // condition code: the lowest set bit indicates the end of the | |||
7085 | // encoding, and above that, a 1 bit indicates 'else', and an 0 | |||
7086 | // indicates 'then'. E.g. | |||
7087 | // IT -> 1000 | |||
7088 | // ITx -> x100 (ITT -> 0100, ITE -> 1100) | |||
7089 | // ITxy -> xy10 (e.g. ITET -> 1010) | |||
7090 | // ITxyz -> xyz1 (e.g. ITEET -> 1101) | |||
7091 | // Note: See the ARM::PredBlockMask enum in | |||
7092 | // /lib/Target/ARM/Utils/ARMBaseInfo.h | |||
7093 | if (Mnemonic == "it" || Mnemonic.startswith("vpt") || | |||
7094 | Mnemonic.startswith("vpst")) { | |||
7095 | SMLoc Loc = Mnemonic == "it" ? SMLoc::getFromPointer(NameLoc.getPointer() + 2) : | |||
7096 | Mnemonic == "vpt" ? SMLoc::getFromPointer(NameLoc.getPointer() + 3) : | |||
7097 | SMLoc::getFromPointer(NameLoc.getPointer() + 4); | |||
7098 | if (ITMask.size() > 3) { | |||
7099 | if (Mnemonic == "it") | |||
7100 | return Error(Loc, "too many conditions on IT instruction"); | |||
7101 | return Error(Loc, "too many conditions on VPT instruction"); | |||
7102 | } | |||
7103 | unsigned Mask = 8; | |||
7104 | for (unsigned i = ITMask.size(); i != 0; --i) { | |||
7105 | char pos = ITMask[i - 1]; | |||
7106 | if (pos != 't' && pos != 'e') { | |||
7107 | return Error(Loc, "illegal IT block condition mask '" + ITMask + "'"); | |||
7108 | } | |||
7109 | Mask >>= 1; | |||
7110 | if (ITMask[i - 1] == 'e') | |||
7111 | Mask |= 8; | |||
7112 | } | |||
7113 | Operands.push_back(ARMOperand::CreateITMask(Mask, Loc)); | |||
7114 | } | |||
7115 | ||||
7116 | // FIXME: This is all a pretty gross hack. We should automatically handle | |||
7117 | // optional operands like this via tblgen. | |||
7118 | ||||
7119 | // Next, add the CCOut and ConditionCode operands, if needed. | |||
7120 | // | |||
7121 | // For mnemonics which can ever incorporate a carry setting bit or predication | |||
7122 | // code, our matching model involves us always generating CCOut and | |||
7123 | // ConditionCode operands to match the mnemonic "as written" and then we let | |||
7124 | // the matcher deal with finding the right instruction or generating an | |||
7125 | // appropriate error. | |||
7126 | bool CanAcceptCarrySet, CanAcceptPredicationCode, CanAcceptVPTPredicationCode; | |||
7127 | getMnemonicAcceptInfo(Mnemonic, ExtraToken, Name, CanAcceptCarrySet, | |||
7128 | CanAcceptPredicationCode, CanAcceptVPTPredicationCode); | |||
7129 | ||||
7130 | // If we had a carry-set on an instruction that can't do that, issue an | |||
7131 | // error. | |||
7132 | if (!CanAcceptCarrySet && CarrySetting) { | |||
7133 | return Error(NameLoc, "instruction '" + Mnemonic + | |||
7134 | "' can not set flags, but 's' suffix specified"); | |||
7135 | } | |||
7136 | // If we had a predication code on an instruction that can't do that, issue an | |||
7137 | // error. | |||
7138 | if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) { | |||
7139 | return Error(NameLoc, "instruction '" + Mnemonic + | |||
7140 | "' is not predicable, but condition code specified"); | |||
7141 | } | |||
7142 | ||||
7143 | // If we had a VPT predication code on an instruction that can't do that, issue an | |||
7144 | // error. | |||
7145 | if (!CanAcceptVPTPredicationCode && VPTPredicationCode != ARMVCC::None) { | |||
7146 | return Error(NameLoc, "instruction '" + Mnemonic + | |||
7147 | "' is not VPT predicable, but VPT code T/E is specified"); | |||
7148 | } | |||
7149 | ||||
7150 | // Add the carry setting operand, if necessary. | |||
7151 | if (CanAcceptCarrySet) { | |||
7152 | SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size()); | |||
7153 | Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0, | |||
7154 | Loc)); | |||
7155 | } | |||
7156 | ||||
7157 | // Add the predication code operand, if necessary. | |||
7158 | if (CanAcceptPredicationCode) { | |||
7159 | SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() + | |||
7160 | CarrySetting); | |||
7161 | Operands.push_back(ARMOperand::CreateCondCode( | |||
7162 | ARMCC::CondCodes(PredicationCode), Loc)); | |||
7163 | } | |||
7164 | ||||
7165 | // Add the VPT predication code operand, if necessary. | |||
7166 | // FIXME: We don't add them for the instructions filtered below as these can | |||
7167 | // have custom operands which need special parsing. This parsing requires | |||
7168 | // the operand to be in the same place in the OperandVector as their | |||
7169 | // definition in tblgen. Since these instructions may also have the | |||
7170 | // scalar predication operand we do not add the vector one and leave until | |||
7171 | // now to fix it up. | |||
7172 | if (CanAcceptVPTPredicationCode && Mnemonic != "vmov" && | |||
7173 | !Mnemonic.startswith("vcmp") && | |||
7174 | !(Mnemonic.startswith("vcvt") && Mnemonic != "vcvta" && | |||
7175 | Mnemonic != "vcvtn" && Mnemonic != "vcvtp" && Mnemonic != "vcvtm")) { | |||
7176 | SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() + | |||
7177 | CarrySetting); | |||
7178 | Operands.push_back(ARMOperand::CreateVPTPred( | |||
7179 | ARMVCC::VPTCodes(VPTPredicationCode), Loc)); | |||
7180 | } | |||
7181 | ||||
7182 | // Add the processor imod operand, if necessary. | |||
7183 | if (ProcessorIMod) { | |||
7184 | Operands.push_back(ARMOperand::CreateImm( | |||
7185 | MCConstantExpr::create(ProcessorIMod, getContext()), | |||
7186 | NameLoc, NameLoc)); | |||
7187 | } else if (Mnemonic == "cps" && isMClass()) { | |||
7188 | return Error(NameLoc, "instruction 'cps' requires effect for M-class"); | |||
7189 | } | |||
7190 | ||||
7191 | // Add the remaining tokens in the mnemonic. | |||
7192 | while (Next != StringRef::npos) { | |||
7193 | Start = Next; | |||
7194 | Next = Name.find('.', Start + 1); | |||
7195 | ExtraToken = Name.slice(Start, Next); | |||
7196 | ||||
7197 | // Some NEON instructions have an optional datatype suffix that is | |||
7198 | // completely ignored. Check for that. | |||
7199 | if (isDataTypeToken(ExtraToken) && | |||
7200 | doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken)) | |||
7201 | continue; | |||
7202 | ||||
7203 | // For for ARM mode generate an error if the .n qualifier is used. | |||
7204 | if (ExtraToken == ".n" && !isThumb()) { | |||
7205 | SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start); | |||
7206 | return Error(Loc, "instruction with .n (narrow) qualifier not allowed in " | |||
7207 | "arm mode"); | |||
7208 | } | |||
7209 | ||||
7210 | // The .n qualifier is always discarded as that is what the tables | |||
7211 | // and matcher expect. In ARM mode the .w qualifier has no effect, | |||
7212 | // so discard it to avoid errors that can be caused by the matcher. | |||
7213 | if (ExtraToken != ".n" && (isThumb() || ExtraToken != ".w")) { | |||
7214 | SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start); | |||
7215 | Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc)); | |||
7216 | } | |||
7217 | } | |||
7218 | ||||
7219 | // Read the remaining operands. | |||
7220 | if (getLexer().isNot(AsmToken::EndOfStatement)) { | |||
7221 | // Read the first operand. | |||
7222 | if (parseOperand(Operands, Mnemonic)) { | |||
7223 | return true; | |||
7224 | } | |||
7225 | ||||
7226 | while (parseOptionalToken(AsmToken::Comma)) { | |||
7227 | // Parse and remember the operand. | |||
7228 | if (parseOperand(Operands, Mnemonic)) { | |||
7229 | return true; | |||
7230 | } | |||
7231 | } | |||
7232 | } | |||
7233 | ||||
7234 | if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list")) | |||
7235 | return true; | |||
7236 | ||||
7237 | tryConvertingToTwoOperandForm(Mnemonic, CarrySetting, Operands); | |||
7238 | ||||
7239 | if (hasCDE() && MS.isCDEInstr(Mnemonic)) { | |||
7240 | // Dual-register instructions use even-odd register pairs as their | |||
7241 | // destination operand, in assembly such pair is spelled as two | |||
7242 | // consecutive registers, without any special syntax. ConvertDualRegOperand | |||
7243 | // tries to convert such operand into register pair, e.g. r2, r3 -> r2_r3. | |||
7244 | // It returns true, if an error message has been emitted. If the function | |||
7245 | // returns false, the function either succeeded or an error (e.g. missing | |||
7246 | // operand) will be diagnosed elsewhere. | |||
7247 | if (MS.isCDEDualRegInstr(Mnemonic)) { | |||
7248 | bool GotError = CDEConvertDualRegOperand(Mnemonic, Operands); | |||
7249 | if (GotError) | |||
7250 | return GotError; | |||
7251 | } | |||
7252 | } | |||
7253 | ||||
7254 | // Some instructions, mostly Thumb, have forms for the same mnemonic that | |||
7255 | // do and don't have a cc_out optional-def operand. With some spot-checks | |||
7256 | // of the operand list, we can figure out which variant we're trying to | |||
7257 | // parse and adjust accordingly before actually matching. We shouldn't ever | |||
7258 | // try to remove a cc_out operand that was explicitly set on the | |||
7259 | // mnemonic, of course (CarrySetting == true). Reason number #317 the | |||
7260 | // table driven matcher doesn't fit well with the ARM instruction set. | |||
7261 | if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) | |||
7262 | Operands.erase(Operands.begin() + 1); | |||
7263 | ||||
7264 | // Some instructions have the same mnemonic, but don't always | |||
7265 | // have a predicate. Distinguish them here and delete the | |||
7266 | // appropriate predicate if needed. This could be either the scalar | |||
7267 | // predication code or the vector predication code. | |||
7268 | if (PredicationCode == ARMCC::AL && | |||
7269 | shouldOmitPredicateOperand(Mnemonic, Operands)) | |||
7270 | Operands.erase(Operands.begin() + 1); | |||
7271 | ||||
7272 | ||||
7273 | if (hasMVE()) { | |||
7274 | if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands) && | |||
7275 | Mnemonic == "vmov" && PredicationCode == ARMCC::LT) { | |||
7276 | // Very nasty hack to deal with the vector predicated variant of vmovlt | |||
7277 | // the scalar predicated vmov with condition 'lt'. We can not tell them | |||
7278 | // apart until we have parsed their operands. | |||
7279 | Operands.erase(Operands.begin() + 1); | |||
7280 | Operands.erase(Operands.begin()); | |||
7281 | SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer()); | |||
7282 | SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() + | |||
7283 | Mnemonic.size() - 1 + CarrySetting); | |||
7284 | Operands.insert(Operands.begin(), | |||
7285 | ARMOperand::CreateVPTPred(ARMVCC::None, PLoc)); | |||
7286 | Operands.insert(Operands.begin(), | |||
7287 | ARMOperand::CreateToken(StringRef("vmovlt"), MLoc)); | |||
7288 | } else if (Mnemonic == "vcvt" && PredicationCode == ARMCC::NE && | |||
7289 | !shouldOmitVectorPredicateOperand(Mnemonic, Operands)) { | |||
7290 | // Another nasty hack to deal with the ambiguity between vcvt with scalar | |||
7291 | // predication 'ne' and vcvtn with vector predication 'e'. As above we | |||
7292 | // can only distinguish between the two after we have parsed their | |||
7293 | // operands. | |||
7294 | Operands.erase(Operands.begin() + 1); | |||
7295 | Operands.erase(Operands.begin()); | |||
7296 | SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer()); | |||
7297 | SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() + | |||
7298 | Mnemonic.size() - 1 + CarrySetting); | |||
7299 | Operands.insert(Operands.begin(), | |||
7300 | ARMOperand::CreateVPTPred(ARMVCC::Else, PLoc)); | |||
7301 | Operands.insert(Operands.begin(), | |||
7302 | ARMOperand::CreateToken(StringRef("vcvtn"), MLoc)); | |||
7303 | } else if (Mnemonic == "vmul" && PredicationCode == ARMCC::LT && | |||
7304 | !shouldOmitVectorPredicateOperand(Mnemonic, Operands)) { | |||
7305 | // Another hack, this time to distinguish between scalar predicated vmul | |||
7306 | // with 'lt' predication code and the vector instruction vmullt with | |||
7307 | // vector predication code "none" | |||
7308 | Operands.erase(Operands.begin() + 1); | |||
7309 | Operands.erase(Operands.begin()); | |||
7310 | SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer()); | |||
7311 | Operands.insert(Operands.begin(), | |||
7312 | ARMOperand::CreateToken(StringRef("vmullt"), MLoc)); | |||
7313 | } | |||
7314 | // For vmov and vcmp, as mentioned earlier, we did not add the vector | |||
7315 | // predication code, since these may contain operands that require | |||
7316 | // special parsing. So now we have to see if they require vector | |||
7317 | // predication and replace the scalar one with the vector predication | |||
7318 | // operand if that is the case. | |||
7319 | else if (Mnemonic == "vmov" || Mnemonic.startswith("vcmp") || | |||
7320 | (Mnemonic.startswith("vcvt") && !Mnemonic.startswith("vcvta") && | |||
7321 | !Mnemonic.startswith("vcvtn") && !Mnemonic.startswith("vcvtp") && | |||
7322 | !Mnemonic.startswith("vcvtm"))) { | |||
7323 | if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands)) { | |||
7324 | // We could not split the vector predicate off vcvt because it might | |||
7325 | // have been the scalar vcvtt instruction. Now we know its a vector | |||
7326 | // instruction, we still need to check whether its the vector | |||
7327 | // predicated vcvt with 'Then' predication or the vector vcvtt. We can | |||
7328 | // distinguish the two based on the suffixes, if it is any of | |||
7329 | // ".f16.f32", ".f32.f16", ".f16.f64" or ".f64.f16" then it is the vcvtt. | |||
7330 | if (Mnemonic.startswith("vcvtt") && Operands.size() >= 4) { | |||
7331 | auto Sz1 = static_cast<ARMOperand &>(*Operands[2]); | |||
7332 | auto Sz2 = static_cast<ARMOperand &>(*Operands[3]); | |||
7333 | if (!(Sz1.isToken() && Sz1.getToken().startswith(".f") && | |||
7334 | Sz2.isToken() && Sz2.getToken().startswith(".f"))) { | |||
7335 | Operands.erase(Operands.begin()); | |||
7336 | SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer()); | |||
7337 | VPTPredicationCode = ARMVCC::Then; | |||
7338 | ||||
7339 | Mnemonic = Mnemonic.substr(0, 4); | |||
7340 | Operands.insert(Operands.begin(), | |||
7341 | ARMOperand::CreateToken(Mnemonic, MLoc)); | |||
7342 | } | |||
7343 | } | |||
7344 | Operands.erase(Operands.begin() + 1); | |||
7345 | SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() + | |||
7346 | Mnemonic.size() + CarrySetting); | |||
7347 | Operands.insert(Operands.begin() + 1, | |||
7348 | ARMOperand::CreateVPTPred( | |||
7349 | ARMVCC::VPTCodes(VPTPredicationCode), PLoc)); | |||
7350 | } | |||
7351 | } else if (CanAcceptVPTPredicationCode) { | |||
7352 | // For all other instructions, make sure only one of the two | |||
7353 | // predication operands is left behind, depending on whether we should | |||
7354 | // use the vector predication. | |||
7355 | if (shouldOmitVectorPredicateOperand(Mnemonic, Operands)) { | |||
7356 | if (CanAcceptPredicationCode) | |||
7357 | Operands.erase(Operands.begin() + 2); | |||
7358 | else | |||
7359 | Operands.erase(Operands.begin() + 1); | |||
7360 | } else if (CanAcceptPredicationCode && PredicationCode == ARMCC::AL) { | |||
7361 | Operands.erase(Operands.begin() + 1); | |||
7362 | } | |||
7363 | } | |||
7364 | } | |||
7365 | ||||
7366 | if (VPTPredicationCode != ARMVCC::None) { | |||
7367 | bool usedVPTPredicationCode = false; | |||
7368 | for (unsigned I = 1; I < Operands.size(); ++I) | |||
7369 | if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred()) | |||
7370 | usedVPTPredicationCode = true; | |||
7371 | if (!usedVPTPredicationCode) { | |||
7372 | // If we have a VPT predication code and we haven't just turned it | |||
7373 | // into an operand, then it was a mistake for splitMnemonic to | |||
7374 | // separate it from the rest of the mnemonic in the first place, | |||
7375 | // and this may lead to wrong disassembly (e.g. scalar floating | |||
7376 | // point VCMPE is actually a different instruction from VCMP, so | |||
7377 | // we mustn't treat them the same). In that situation, glue it | |||
7378 | // back on. | |||
7379 | Mnemonic = Name.slice(0, Mnemonic.size() + 1); | |||
7380 | Operands.erase(Operands.begin()); | |||
7381 | Operands.insert(Operands.begin(), | |||
7382 | ARMOperand::CreateToken(Mnemonic, NameLoc)); | |||
7383 | } | |||
7384 | } | |||
7385 | ||||
7386 | // ARM mode 'blx' need special handling, as the register operand version | |||
7387 | // is predicable, but the label operand version is not. So, we can't rely | |||
7388 | // on the Mnemonic based checking to correctly figure out when to put | |||
7389 | // a k_CondCode operand in the list. If we're trying to match the label | |||
7390 | // version, remove the k_CondCode operand here. | |||
7391 | if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 && | |||
7392 | static_cast<ARMOperand &>(*Operands[2]).isImm()) | |||
7393 | Operands.erase(Operands.begin() + 1); | |||
7394 | ||||
7395 | // Adjust operands of ldrexd/strexd to MCK_GPRPair. | |||
7396 | // ldrexd/strexd require even/odd GPR pair. To enforce this constraint, | |||
7397 | // a single GPRPair reg operand is used in the .td file to replace the two | |||
7398 | // GPRs. However, when parsing from asm, the two GRPs cannot be | |||
7399 | // automatically | |||
7400 | // expressed as a GPRPair, so we have to manually merge them. | |||
7401 | // FIXME: We would really like to be able to tablegen'erate this. | |||
7402 | if (!isThumb() && Operands.size() > 4 && | |||
7403 | (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" || | |||
7404 | Mnemonic == "stlexd")) { | |||
7405 | bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd"); | |||
7406 | unsigned Idx = isLoad ? 2 : 3; | |||
7407 | ARMOperand &Op1 = static_cast<ARMOperand &>(*Operands[Idx]); | |||
7408 | ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[Idx + 1]); | |||
7409 | ||||
7410 | const MCRegisterClass &MRC = MRI->getRegClass(ARM::GPRRegClassID); | |||
7411 | // Adjust only if Op1 and Op2 are GPRs. | |||
7412 | if (Op1.isReg() && Op2.isReg() && MRC.contains(Op1.getReg()) && | |||
7413 | MRC.contains(Op2.getReg())) { | |||
7414 | unsigned Reg1 = Op1.getReg(); | |||
7415 | unsigned Reg2 = Op2.getReg(); | |||
7416 | unsigned Rt = MRI->getEncodingValue(Reg1); | |||
7417 | unsigned Rt2 = MRI->getEncodingValue(Reg2); | |||
7418 | ||||
7419 | // Rt2 must be Rt + 1 and Rt must be even. | |||
7420 | if (Rt + 1 != Rt2 || (Rt & 1)) { | |||
7421 | return Error(Op2.getStartLoc(), | |||
7422 | isLoad ? "destination operands must be sequential" | |||
7423 | : "source operands must be sequential"); | |||
7424 | } | |||
7425 | unsigned NewReg = MRI->getMatchingSuperReg( | |||
7426 | Reg1, ARM::gsub_0, &(MRI->getRegClass(ARM::GPRPairRegClassID))); | |||
7427 | Operands[Idx] = | |||
7428 | ARMOperand::CreateReg(NewReg, Op1.getStartLoc(), Op2.getEndLoc()); | |||
7429 | Operands.erase(Operands.begin() + Idx + 1); | |||
7430 | } | |||
7431 | } | |||
7432 | ||||
7433 | // GNU Assembler extension (compatibility). | |||
7434 | fixupGNULDRDAlias(Mnemonic, Operands); | |||
7435 | ||||
7436 | // FIXME: As said above, this is all a pretty gross hack. This instruction | |||
7437 | // does not fit with other "subs" and tblgen. | |||
7438 | // Adjust operands of B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction | |||
7439 | // so the Mnemonic is the original name "subs" and delete the predicate | |||
7440 | // operand so it will match the table entry. | |||
7441 | if (isThumbTwo() && Mnemonic == "sub" && Operands.size() == 6 && | |||
7442 | static_cast<ARMOperand &>(*Operands[3]).isReg() && | |||
7443 | static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::PC && | |||
7444 | static_cast<ARMOperand &>(*Operands[4]).isReg() && | |||
7445 | static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::LR && | |||
7446 | static_cast<ARMOperand &>(*Operands[5]).isImm()) { | |||
7447 | Operands.front() = ARMOperand::CreateToken(Name, NameLoc); | |||
7448 | Operands.erase(Operands.begin() + 1); | |||
7449 | } | |||
7450 | return false; | |||
7451 | } | |||
7452 | ||||
7453 | // Validate context-sensitive operand constraints. | |||
7454 | ||||
7455 | // return 'true' if register list contains non-low GPR registers, | |||
7456 | // 'false' otherwise. If Reg is in the register list or is HiReg, set | |||
7457 | // 'containsReg' to true. | |||
7458 | static bool checkLowRegisterList(const MCInst &Inst, unsigned OpNo, | |||
7459 | unsigned Reg, unsigned HiReg, | |||
7460 | bool &containsReg) { | |||
7461 | containsReg = false; | |||
7462 | for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { | |||
7463 | unsigned OpReg = Inst.getOperand(i).getReg(); | |||
7464 | if (OpReg == Reg) | |||
7465 | containsReg = true; | |||
7466 | // Anything other than a low register isn't legal here. | |||
7467 | if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg)) | |||
7468 | return true; | |||
7469 | } | |||
7470 | return false; | |||
7471 | } | |||
7472 | ||||
7473 | // Check if the specified regisgter is in the register list of the inst, | |||
7474 | // starting at the indicated operand number. | |||
7475 | static bool listContainsReg(const MCInst &Inst, unsigned OpNo, unsigned Reg) { | |||
7476 | for (unsigned i = OpNo, e = Inst.getNumOperands(); i < e; ++i) { | |||
7477 | unsigned OpReg = Inst.getOperand(i).getReg(); | |||
7478 | if (OpReg == Reg) | |||
7479 | return true; | |||
7480 | } | |||
7481 | return false; | |||
7482 | } | |||
7483 | ||||
7484 | // Return true if instruction has the interesting property of being | |||
7485 | // allowed in IT blocks, but not being predicable. | |||
7486 | static bool instIsBreakpoint(const MCInst &Inst) { | |||
7487 | return Inst.getOpcode() == ARM::tBKPT || | |||
7488 | Inst.getOpcode() == ARM::BKPT || | |||
7489 | Inst.getOpcode() == ARM::tHLT || | |||
7490 | Inst.getOpcode() == ARM::HLT; | |||
7491 | } | |||
7492 | ||||
7493 | bool ARMAsmParser::validatetLDMRegList(const MCInst &Inst, | |||
7494 | const OperandVector &Operands, | |||
7495 | unsigned ListNo, bool IsARPop) { | |||
7496 | const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]); | |||
7497 | bool HasWritebackToken = Op.isToken() && Op.getToken() == "!"; | |||
7498 | ||||
7499 | bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP); | |||
7500 | bool ListContainsLR = listContainsReg(Inst, ListNo, ARM::LR); | |||
7501 | bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC); | |||
7502 | ||||
7503 | if (!IsARPop && ListContainsSP) | |||
7504 | return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(), | |||
7505 | "SP may not be in the register list"); | |||
7506 | else if (ListContainsPC && ListContainsLR) | |||
7507 | return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(), | |||
7508 | "PC and LR may not be in the register list simultaneously"); | |||
7509 | return false; | |||
7510 | } | |||
7511 | ||||
7512 | bool ARMAsmParser::validatetSTMRegList(const MCInst &Inst, | |||
7513 | const OperandVector &Operands, | |||
7514 | unsigned ListNo) { | |||
7515 | const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]); | |||
7516 | bool HasWritebackToken = Op.isToken() && Op.getToken() == "!"; | |||
7517 | ||||
7518 | bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP); | |||
7519 | bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC); | |||
7520 | ||||
7521 | if (ListContainsSP && ListContainsPC) | |||
7522 | return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(), | |||
7523 | "SP and PC may not be in the register list"); | |||
7524 | else if (ListContainsSP) | |||
7525 | return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(), | |||
7526 | "SP may not be in the register list"); | |||
7527 | else if (ListContainsPC) | |||
7528 | return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(), | |||
7529 | "PC may not be in the register list"); | |||
7530 | return false; | |||
7531 | } | |||
7532 | ||||
7533 | bool ARMAsmParser::validateLDRDSTRD(MCInst &Inst, | |||
7534 | const OperandVector &Operands, | |||
7535 | bool Load, bool ARMMode, bool Writeback) { | |||
7536 | unsigned RtIndex = Load || !Writeback ? 0 : 1; | |||
7537 | unsigned Rt = MRI->getEncodingValue(Inst.getOperand(RtIndex).getReg()); | |||
7538 | unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(RtIndex + 1).getReg()); | |||
7539 | ||||
7540 | if (ARMMode) { | |||
7541 | // Rt can't be R14. | |||
7542 | if (Rt == 14) | |||
7543 | return Error(Operands[3]->getStartLoc(), | |||
7544 | "Rt can't be R14"); | |||
7545 | ||||
7546 | // Rt must be even-numbered. | |||
7547 | if ((Rt & 1) == 1) | |||
7548 | return Error(Operands[3]->getStartLoc(), | |||
7549 | "Rt must be even-numbered"); | |||
7550 | ||||
7551 | // Rt2 must be Rt + 1. | |||
7552 | if (Rt2 != Rt + 1) { | |||
7553 | if (Load) | |||
7554 | return Error(Operands[3]->getStartLoc(), | |||
7555 | "destination operands must be sequential"); | |||
7556 | else | |||
7557 | return Error(Operands[3]->getStartLoc(), | |||
7558 | "source operands must be sequential"); | |||
7559 | } | |||
7560 | ||||
7561 | // FIXME: Diagnose m == 15 | |||
7562 | // FIXME: Diagnose ldrd with m == t || m == t2. | |||
7563 | } | |||
7564 | ||||
7565 | if (!ARMMode && Load) { | |||
7566 | if (Rt2 == Rt) | |||
7567 | return Error(Operands[3]->getStartLoc(), | |||
7568 | "destination operands can't be identical"); | |||
7569 | } | |||
7570 | ||||
7571 | if (Writeback) { | |||
7572 | unsigned Rn = MRI->getEncodingValue(Inst.getOperand(3).getReg()); | |||
7573 | ||||
7574 | if (Rn == Rt || Rn == Rt2) { | |||
7575 | if (Load) | |||
7576 | return Error(Operands[3]->getStartLoc(), | |||
7577 | "base register needs to be different from destination " | |||
7578 | "registers"); | |||
7579 | else | |||
7580 | return Error(Operands[3]->getStartLoc(), | |||
7581 | "source register and base register can't be identical"); | |||
7582 | } | |||
7583 | ||||
7584 | // FIXME: Diagnose ldrd/strd with writeback and n == 15. | |||
7585 | // (Except the immediate form of ldrd?) | |||
7586 | } | |||
7587 | ||||
7588 | return false; | |||
7589 | } | |||
7590 | ||||
7591 | static int findFirstVectorPredOperandIdx(const MCInstrDesc &MCID) { | |||
7592 | for (unsigned i = 0; i < MCID.NumOperands; ++i) { | |||
7593 | if (ARM::isVpred(MCID.OpInfo[i].OperandType)) | |||
7594 | return i; | |||
7595 | } | |||
7596 | return -1; | |||
7597 | } | |||
7598 | ||||
7599 | static bool isVectorPredicable(const MCInstrDesc &MCID) { | |||
7600 | return findFirstVectorPredOperandIdx(MCID) != -1; | |||
7601 | } | |||
7602 | ||||
7603 | // FIXME: We would really like to be able to tablegen'erate this. | |||
7604 | bool ARMAsmParser::validateInstruction(MCInst &Inst, | |||
7605 | const OperandVector &Operands) { | |||
7606 | const MCInstrDesc &MCID = MII.get(Inst.getOpcode()); | |||
7607 | SMLoc Loc = Operands[0]->getStartLoc(); | |||
7608 | ||||
7609 | // Check the IT block state first. | |||
7610 | // NOTE: BKPT and HLT instructions have the interesting property of being | |||
7611 | // allowed in IT blocks, but not being predicable. They just always execute. | |||
7612 | if (inITBlock() && !instIsBreakpoint(Inst)) { | |||
7613 | // The instruction must be predicable. | |||
7614 | if (!MCID.isPredicable()) | |||
7615 | return Error(Loc, "instructions in IT block must be predicable"); | |||
7616 | ARMCC::CondCodes Cond = ARMCC::CondCodes( | |||
7617 | Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm()); | |||
7618 | if (Cond != currentITCond()) { | |||
7619 | // Find the condition code Operand to get its SMLoc information. | |||
7620 | SMLoc CondLoc; | |||
7621 | for (unsigned I = 1; I < Operands.size(); ++I) | |||
7622 | if (static_cast<ARMOperand &>(*Operands[I]).isCondCode()) | |||
7623 | CondLoc = Operands[I]->getStartLoc(); | |||
7624 | return Error(CondLoc, "incorrect condition in IT block; got '" + | |||
7625 | StringRef(ARMCondCodeToString(Cond)) + | |||
7626 | "', but expected '" + | |||
7627 | ARMCondCodeToString(currentITCond()) + "'"); | |||
7628 | } | |||
7629 | // Check for non-'al' condition codes outside of the IT block. | |||
7630 | } else if (isThumbTwo() && MCID.isPredicable() && | |||
7631 | Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() != | |||
7632 | ARMCC::AL && Inst.getOpcode() != ARM::tBcc && | |||
7633 | Inst.getOpcode() != ARM::t2Bcc && | |||
7634 | Inst.getOpcode() != ARM::t2BFic) { | |||
7635 | return Error(Loc, "predicated instructions must be in IT block"); | |||
7636 | } else if (!isThumb() && !useImplicitITARM() && MCID.isPredicable() && | |||
7637 | Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() != | |||
7638 | ARMCC::AL) { | |||
7639 | return Warning(Loc, "predicated instructions should be in IT block"); | |||
7640 | } else if (!MCID.isPredicable()) { | |||
7641 | // Check the instruction doesn't have a predicate operand anyway | |||
7642 | // that it's not allowed to use. Sometimes this happens in order | |||
7643 | // to keep instructions the same shape even though one cannot | |||
7644 | // legally be predicated, e.g. vmul.f16 vs vmul.f32. | |||
7645 | for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) { | |||
7646 | if (MCID.OpInfo[i].isPredicate()) { | |||
7647 | if (Inst.getOperand(i).getImm() != ARMCC::AL) | |||
7648 | return Error(Loc, "instruction is not predicable"); | |||
7649 | break; | |||
7650 | } | |||
7651 | } | |||
7652 | } | |||
7653 | ||||
7654 | // PC-setting instructions in an IT block, but not the last instruction of | |||
7655 | // the block, are UNPREDICTABLE. | |||
7656 | if (inExplicitITBlock() && !lastInITBlock() && isITBlockTerminator(Inst)) { | |||
7657 | return Error(Loc, "instruction must be outside of IT block or the last instruction in an IT block"); | |||
7658 | } | |||
7659 | ||||
7660 | if (inVPTBlock() && !instIsBreakpoint(Inst)) { | |||
7661 | unsigned Bit = extractITMaskBit(VPTState.Mask, VPTState.CurPosition); | |||
7662 | if (!isVectorPredicable(MCID)) | |||
7663 | return Error(Loc, "instruction in VPT block must be predicable"); | |||
7664 | unsigned Pred = Inst.getOperand(findFirstVectorPredOperandIdx(MCID)).getImm(); | |||
7665 | unsigned VPTPred = Bit ? ARMVCC::Else : ARMVCC::Then; | |||
7666 | if (Pred != VPTPred) { | |||
7667 | SMLoc PredLoc; | |||
7668 | for (unsigned I = 1; I < Operands.size(); ++I) | |||
7669 | if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred()) | |||
7670 | PredLoc = Operands[I]->getStartLoc(); | |||
7671 | return Error(PredLoc, "incorrect predication in VPT block; got '" + | |||
7672 | StringRef(ARMVPTPredToString(ARMVCC::VPTCodes(Pred))) + | |||
7673 | "', but expected '" + | |||
7674 | ARMVPTPredToString(ARMVCC::VPTCodes(VPTPred)) + "'"); | |||
7675 | } | |||
7676 | } | |||
7677 | else if (isVectorPredicable(MCID) && | |||
7678 | Inst.getOperand(findFirstVectorPredOperandIdx(MCID)).getImm() != | |||
7679 | ARMVCC::None) | |||
7680 | return Error(Loc, "VPT predicated instructions must be in VPT block"); | |||
7681 | ||||
7682 | const unsigned Opcode = Inst.getOpcode(); | |||
7683 | switch (Opcode) { | |||
7684 | case ARM::t2IT: { | |||
7685 | // Encoding is unpredictable if it ever results in a notional 'NV' | |||
7686 | // predicate. Since we don't parse 'NV' directly this means an 'AL' | |||
7687 | // predicate with an "else" mask bit. | |||
7688 | unsigned Cond = Inst.getOperand(0).getImm(); | |||
7689 | unsigned Mask = Inst.getOperand(1).getImm(); | |||
7690 | ||||
7691 | // Conditions only allowing a 't' are those with no set bit except | |||
7692 | // the lowest-order one that indicates the end of the sequence. In | |||
7693 | // other words, powers of 2. | |||
7694 | if (Cond == ARMCC::AL && countPopulation(Mask) != 1) | |||
7695 | return Error(Loc, "unpredictable IT predicate sequence"); | |||
7696 | break; | |||
7697 | } | |||
7698 | case ARM::LDRD: | |||
7699 | if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/true, | |||
7700 | /*Writeback*/false)) | |||
7701 | return true; | |||
7702 | break; | |||
7703 | case ARM::LDRD_PRE: | |||
7704 | case ARM::LDRD_POST: | |||
7705 | if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/true, | |||
7706 | /*Writeback*/true)) | |||
7707 | return true; | |||
7708 | break; | |||
7709 | case ARM::t2LDRDi8: | |||
7710 | if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/false, | |||
7711 | /*Writeback*/false)) | |||
7712 | return true; | |||
7713 | break; | |||
7714 | case ARM::t2LDRD_PRE: | |||
7715 | case ARM::t2LDRD_POST: | |||
7716 | if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/false, | |||
7717 | /*Writeback*/true)) | |||
7718 | return true; | |||
7719 | break; | |||
7720 | case ARM::t2BXJ: { | |||
7721 | const unsigned RmReg = Inst.getOperand(0).getReg(); | |||
7722 | // Rm = SP is no longer unpredictable in v8-A | |||
7723 | if (RmReg == ARM::SP && !hasV8Ops()) | |||
7724 | return Error(Operands[2]->getStartLoc(), | |||
7725 | "r13 (SP) is an unpredictable operand to BXJ"); | |||
7726 | return false; | |||
7727 | } | |||
7728 | case ARM::STRD: | |||
7729 | if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/true, | |||
7730 | /*Writeback*/false)) | |||
7731 | return true; | |||
7732 | break; | |||
7733 | case ARM::STRD_PRE: | |||
7734 | case ARM::STRD_POST: | |||
7735 | if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/true, | |||
7736 | /*Writeback*/true)) | |||
7737 | return true; | |||
7738 | break; | |||
7739 | case ARM::t2STRD_PRE: | |||
7740 | case ARM::t2STRD_POST: | |||
7741 | if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/false, | |||
7742 | /*Writeback*/true)) | |||
7743 | return true; | |||
7744 | break; | |||
7745 | case ARM::STR_PRE_IMM: | |||
7746 | case ARM::STR_PRE_REG: | |||
7747 | case ARM::t2STR_PRE: | |||
7748 | case ARM::STR_POST_IMM: | |||
7749 | case ARM::STR_POST_REG: | |||
7750 | case ARM::t2STR_POST: | |||
7751 | case ARM::STRH_PRE: | |||
7752 | case ARM::t2STRH_PRE: | |||
7753 | case ARM::STRH_POST: | |||
7754 | case ARM::t2STRH_POST: | |||
7755 | case ARM::STRB_PRE_IMM: | |||
7756 | case ARM::STRB_PRE_REG: | |||
7757 | case ARM::t2STRB_PRE: | |||
7758 | case ARM::STRB_POST_IMM: | |||
7759 | case ARM::STRB_POST_REG: | |||
7760 | case ARM::t2STRB_POST: { | |||
7761 | // Rt must be different from Rn. | |||
7762 | const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg()); | |||
7763 | const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg()); | |||
7764 | ||||
7765 | if (Rt == Rn) | |||
7766 | return Error(Operands[3]->getStartLoc(), | |||
7767 | "source register and base register can't be identical"); | |||
7768 | return false; | |||
7769 | } | |||
7770 | case ARM::t2LDR_PRE_imm: | |||
7771 | case ARM::t2LDR_POST_imm: | |||
7772 | case ARM::t2STR_PRE_imm: | |||
7773 | case ARM::t2STR_POST_imm: { | |||
7774 | // Rt must be different from Rn. | |||
7775 | const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg()); | |||
7776 | const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(1).getReg()); | |||
7777 | ||||
7778 | if (Rt == Rn) | |||
7779 | return Error(Operands[3]->getStartLoc(), | |||
7780 | "destination register and base register can't be identical"); | |||
7781 | if (Inst.getOpcode() == ARM::t2LDR_POST_imm || | |||
7782 | Inst.getOpcode() == ARM::t2STR_POST_imm) { | |||
7783 | int Imm = Inst.getOperand(2).getImm(); | |||
7784 | if (Imm > 255 || Imm < -255) | |||
7785 | return Error(Operands[5]->getStartLoc(), | |||
7786 | "operand must be in range [-255, 255]"); | |||
7787 | } | |||
7788 | if (Inst.getOpcode() == ARM::t2STR_PRE_imm || | |||
7789 | Inst.getOpcode() == ARM::t2STR_POST_imm) { | |||
7790 | if (Inst.getOperand(0).getReg() == ARM::PC) { | |||
7791 | return Error(Operands[3]->getStartLoc(), | |||
7792 | "operand must be a register in range [r0, r14]"); | |||
7793 | } | |||
7794 | } | |||
7795 | return false; | |||
7796 | } | |||
7797 | case ARM::LDR_PRE_IMM: | |||
7798 | case ARM::LDR_PRE_REG: | |||
7799 | case ARM::t2LDR_PRE: | |||
7800 | case ARM::LDR_POST_IMM: | |||
7801 | case ARM::LDR_POST_REG: | |||
7802 | case ARM::t2LDR_POST: | |||
7803 | case ARM::LDRH_PRE: | |||
7804 | case ARM::t2LDRH_PRE: | |||
7805 | case ARM::LDRH_POST: | |||
7806 | case ARM::t2LDRH_POST: | |||
7807 | case ARM::LDRSH_PRE: | |||
7808 | case ARM::t2LDRSH_PRE: | |||
7809 | case ARM::LDRSH_POST: | |||
7810 | case ARM::t2LDRSH_POST: | |||
7811 | case ARM::LDRB_PRE_IMM: | |||
7812 | case ARM::LDRB_PRE_REG: | |||
7813 | case ARM::t2LDRB_PRE: | |||
7814 | case ARM::LDRB_POST_IMM: | |||
7815 | case ARM::LDRB_POST_REG: | |||
7816 | case ARM::t2LDRB_POST: | |||
7817 | case ARM::LDRSB_PRE: | |||
7818 | case ARM::t2LDRSB_PRE: | |||
7819 | case ARM::LDRSB_POST: | |||
7820 | case ARM::t2LDRSB_POST: { | |||
7821 | // Rt must be different from Rn. | |||
7822 | const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg()); | |||
7823 | const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg()); | |||
7824 | ||||
7825 | if (Rt == Rn) | |||
7826 | return Error(Operands[3]->getStartLoc(), | |||
7827 | "destination register and base register can't be identical"); | |||
7828 | return false; | |||
7829 | } | |||
7830 | ||||
7831 | case ARM::MVE_VLDRBU8_rq: | |||
7832 | case ARM::MVE_VLDRBU16_rq: | |||
7833 | case ARM::MVE_VLDRBS16_rq: | |||
7834 | case ARM::MVE_VLDRBU32_rq: | |||
7835 | case ARM::MVE_VLDRBS32_rq: | |||
7836 | case ARM::MVE_VLDRHU16_rq: | |||
7837 | case ARM::MVE_VLDRHU16_rq_u: | |||
7838 | case ARM::MVE_VLDRHU32_rq: | |||
7839 | case ARM::MVE_VLDRHU32_rq_u: | |||
7840 | case ARM::MVE_VLDRHS32_rq: | |||
7841 | case ARM::MVE_VLDRHS32_rq_u: | |||
7842 | case ARM::MVE_VLDRWU32_rq: | |||
7843 | case ARM::MVE_VLDRWU32_rq_u: | |||
7844 | case ARM::MVE_VLDRDU64_rq: | |||
7845 | case ARM::MVE_VLDRDU64_rq_u: | |||
7846 | case ARM::MVE_VLDRWU32_qi: | |||
7847 | case ARM::MVE_VLDRWU32_qi_pre: | |||
7848 | case ARM::MVE_VLDRDU64_qi: | |||
7849 | case ARM::MVE_VLDRDU64_qi_pre: { | |||
7850 | // Qd must be different from Qm. | |||
7851 | unsigned QdIdx = 0, QmIdx = 2; | |||
7852 | bool QmIsPointer = false; | |||
7853 | switch (Opcode) { | |||
7854 | case ARM::MVE_VLDRWU32_qi: | |||
7855 | case ARM::MVE_VLDRDU64_qi: | |||
7856 | QmIdx = 1; | |||
7857 | QmIsPointer = true; | |||
7858 | break; | |||
7859 | case ARM::MVE_VLDRWU32_qi_pre: | |||
7860 | case ARM::MVE_VLDRDU64_qi_pre: | |||
7861 | QdIdx = 1; | |||
7862 | QmIsPointer = true; | |||
7863 | break; | |||
7864 | } | |||
7865 | ||||
7866 | const unsigned Qd = MRI->getEncodingValue(Inst.getOperand(QdIdx).getReg()); | |||
7867 | const unsigned Qm = MRI->getEncodingValue(Inst.getOperand(QmIdx).getReg()); | |||
7868 | ||||
7869 | if (Qd == Qm) { | |||
7870 | return Error(Operands[3]->getStartLoc(), | |||
7871 | Twine("destination vector register and vector ") + | |||
7872 | (QmIsPointer ? "pointer" : "offset") + | |||
7873 | " register can't be identical"); | |||
7874 | } | |||
7875 | return false; | |||
7876 | } | |||
7877 | ||||
7878 | case ARM::SBFX: | |||
7879 | case ARM::t2SBFX: | |||
7880 | case ARM::UBFX: | |||
7881 | case ARM::t2UBFX: { | |||
7882 | // Width must be in range [1, 32-lsb]. | |||
7883 | unsigned LSB = Inst.getOperand(2).getImm(); | |||
7884 | unsigned Widthm1 = Inst.getOperand(3).getImm(); | |||
7885 | if (Widthm1 >= 32 - LSB) | |||
7886 | return Error(Operands[5]->getStartLoc(), | |||
7887 | "bitfield width must be in range [1,32-lsb]"); | |||
7888 | return false; | |||
7889 | } | |||
7890 | // Notionally handles ARM::tLDMIA_UPD too. | |||
7891 | case ARM::tLDMIA: { | |||
7892 | // If we're parsing Thumb2, the .w variant is available and handles | |||
7893 | // most cases that are normally illegal for a Thumb1 LDM instruction. | |||
7894 | // We'll make the transformation in processInstruction() if necessary. | |||
7895 | // | |||
7896 | // Thumb LDM instructions are writeback iff the base register is not | |||
7897 | // in the register list. | |||
7898 | unsigned Rn = Inst.getOperand(0).getReg(); | |||
7899 | bool HasWritebackToken = | |||
7900 | (static_cast<ARMOperand &>(*Operands[3]).isToken() && | |||
7901 | static_cast<ARMOperand &>(*Operands[3]).getToken() == "!"); | |||
7902 | bool ListContainsBase; | |||
7903 | if (checkLowRegisterList(Inst, 3, Rn, 0, ListContainsBase) && !isThumbTwo()) | |||
7904 | return Error(Operands[3 + HasWritebackToken]->getStartLoc(), | |||
7905 | "registers must be in range r0-r7"); | |||
7906 | // If we should have writeback, then there should be a '!' token. | |||
7907 | if (!ListContainsBase && !HasWritebackToken && !isThumbTwo()) | |||
7908 | return Error(Operands[2]->getStartLoc(), | |||
7909 | "writeback operator '!' expected"); | |||
7910 | // If we should not have writeback, there must not be a '!'. This is | |||
7911 | // true even for the 32-bit wide encodings. | |||
7912 | if (ListContainsBase && HasWritebackToken) | |||
7913 | return Error(Operands[3]->getStartLoc(), | |||
7914 | "writeback operator '!' not allowed when base register " | |||
7915 | "in register list"); | |||
7916 | ||||
7917 | if (validatetLDMRegList(Inst, Operands, 3)) | |||
7918 | return true; | |||
7919 | break; | |||
7920 | } | |||
7921 | case ARM::LDMIA_UPD: | |||
7922 | case ARM::LDMDB_UPD: | |||
7923 | case ARM::LDMIB_UPD: | |||
7924 | case ARM::LDMDA_UPD: | |||
7925 | // ARM variants loading and updating the same register are only officially | |||
7926 | // UNPREDICTABLE on v7 upwards. Goodness knows what they did before. | |||
7927 | if (!hasV7Ops()) | |||
7928 | break; | |||
7929 | if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg())) | |||
7930 | return Error(Operands.back()->getStartLoc(), | |||
7931 | "writeback register not allowed in register list"); | |||
7932 | break; | |||
7933 | case ARM::t2LDMIA: | |||
7934 | case ARM::t2LDMDB: | |||
7935 | if (validatetLDMRegList(Inst, Operands, 3)) | |||
7936 | return true; | |||
7937 | break; | |||
7938 | case ARM::t2STMIA: | |||
7939 | case ARM::t2STMDB: | |||
7940 | if (validatetSTMRegList(Inst, Operands, 3)) | |||
7941 | return true; | |||
7942 | break; | |||
7943 | case ARM::t2LDMIA_UPD: | |||
7944 | case ARM::t2LDMDB_UPD: | |||
7945 | case ARM::t2STMIA_UPD: | |||
7946 | case ARM::t2STMDB_UPD: | |||
7947 | if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg())) | |||
7948 | return Error(Operands.back()->getStartLoc(), | |||
7949 | "writeback register not allowed in register list"); | |||
7950 | ||||
7951 | if (Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) { | |||
7952 | if (validatetLDMRegList(Inst, Operands, 3)) | |||
7953 | return true; | |||
7954 | } else { | |||
7955 | if (validatetSTMRegList(Inst, Operands, 3)) | |||
7956 | return true; | |||
7957 | } | |||
7958 | break; | |||
7959 | ||||
7960 | case ARM::sysLDMIA_UPD: | |||
7961 | case ARM::sysLDMDA_UPD: | |||
7962 | case ARM::sysLDMDB_UPD: | |||
7963 | case ARM::sysLDMIB_UPD: | |||
7964 | if (!listContainsReg(Inst, 3, ARM::PC)) | |||
7965 | return Error(Operands[4]->getStartLoc(), | |||
7966 | "writeback register only allowed on system LDM " | |||
7967 | "if PC in register-list"); | |||
7968 | break; | |||
7969 | case ARM::sysSTMIA_UPD: | |||
7970 | case ARM::sysSTMDA_UPD: | |||
7971 | case ARM::sysSTMDB_UPD: | |||
7972 | case ARM::sysSTMIB_UPD: | |||
7973 | return Error(Operands[2]->getStartLoc(), | |||
7974 | "system STM cannot have writeback register"); | |||
7975 | case ARM::tMUL: | |||
7976 | // The second source operand must be the same register as the destination | |||
7977 | // operand. | |||
7978 | // | |||
7979 | // In this case, we must directly check the parsed operands because the | |||
7980 | // cvtThumbMultiply() function is written in such a way that it guarantees | |||
7981 | // this first statement is always true for the new Inst. Essentially, the | |||
7982 | // destination is unconditionally copied into the second source operand | |||
7983 | // without checking to see if it matches what we actually parsed. | |||
7984 | if (Operands.size() == 6 && (((ARMOperand &)*Operands[3]).getReg() != | |||
7985 | ((ARMOperand &)*Operands[5]).getReg()) && | |||
7986 | (((ARMOperand &)*Operands[3]).getReg() != | |||
7987 | ((ARMOperand &)*Operands[4]).getReg())) { | |||
7988 | return Error(Operands[3]->getStartLoc(), | |||
7989 | "destination register must match source register"); | |||
7990 | } | |||
7991 | break; | |||
7992 | ||||
7993 | // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2, | |||
7994 | // so only issue a diagnostic for thumb1. The instructions will be | |||
7995 | // switched to the t2 encodings in processInstruction() if necessary. | |||
7996 | case ARM::tPOP: { | |||
7997 | bool ListContainsBase; | |||
7998 | if (checkLowRegisterList(Inst, 2, 0, ARM::PC, ListContainsBase) && | |||
7999 | !isThumbTwo()) | |||
8000 | return Error(Operands[2]->getStartLoc(), | |||
8001 | "registers must be in range r0-r7 or pc"); | |||
8002 | if (validatetLDMRegList(Inst, Operands, 2, !isMClass())) | |||
8003 | return true; | |||
8004 | break; | |||
8005 | } | |||
8006 | case ARM::tPUSH: { | |||
8007 | bool ListContainsBase; | |||
8008 | if (checkLowRegisterList(Inst, 2, 0, ARM::LR, ListContainsBase) && | |||
8009 | !isThumbTwo()) | |||
8010 | return Error(Operands[2]->getStartLoc(), | |||
8011 | "registers must be in range r0-r7 or lr"); | |||
8012 | if (validatetSTMRegList(Inst, Operands, 2)) | |||
8013 | return true; | |||
8014 | break; | |||
8015 | } | |||
8016 | case ARM::tSTMIA_UPD: { | |||
8017 | bool ListContainsBase, InvalidLowList; | |||
8018 | InvalidLowList = checkLowRegisterList(Inst, 4, Inst.getOperand(0).getReg(), | |||
8019 | 0, ListContainsBase); | |||
8020 | if (InvalidLowList && !isThumbTwo()) | |||
8021 | return Error(Operands[4]->getStartLoc(), | |||
8022 | "registers must be in range r0-r7"); | |||
8023 | ||||
8024 | // This would be converted to a 32-bit stm, but that's not valid if the | |||
8025 | // writeback register is in the list. | |||
8026 | if (InvalidLowList && ListContainsBase) | |||
8027 | return Error(Operands[4]->getStartLoc(), | |||
8028 | "writeback operator '!' not allowed when base register " | |||
8029 | "in register list"); | |||
8030 | ||||
8031 | if (validatetSTMRegList(Inst, Operands, 4)) | |||
8032 | return true; | |||
8033 | break; | |||
8034 | } | |||
8035 | case ARM::tADDrSP: | |||
8036 | // If the non-SP source operand and the destination operand are not the | |||
8037 | // same, we need thumb2 (for the wide encoding), or we have an error. | |||
8038 | if (!isThumbTwo() && | |||
8039 | Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) { | |||
8040 | return Error(Operands[4]->getStartLoc(), | |||
8041 | "source register must be the same as destination"); | |||
8042 | } | |||
8043 | break; | |||
8044 | ||||
8045 | case ARM::t2ADDrr: | |||
8046 | case ARM::t2ADDrs: | |||
8047 | case ARM::t2SUBrr: | |||
8048 | case ARM::t2SUBrs: | |||
8049 | if (Inst.getOperand(0).getReg() == ARM::SP && | |||
8050 | Inst.getOperand(1).getReg() != ARM::SP) | |||
8051 | return Error(Operands[4]->getStartLoc(), | |||
8052 | "source register must be sp if destination is sp"); | |||
8053 | break; | |||
8054 | ||||
8055 | // Final range checking for Thumb unconditional branch instructions. | |||
8056 | case ARM::tB: | |||
8057 | if (!(static_cast<ARMOperand &>(*Operands[2])).isSignedOffset<11, 1>()) | |||
8058 | return Error(Operands[2]->getStartLoc(), "branch target out of range"); | |||
8059 | break; | |||
8060 | case ARM::t2B: { | |||
8061 | int op = (Operands[2]->isImm()) ? 2 : 3; | |||
8062 | ARMOperand &Operand = static_cast<ARMOperand &>(*Operands[op]); | |||
8063 | // Delay the checks of symbolic expressions until they are resolved. | |||
8064 | if (!isa<MCBinaryExpr>(Operand.getImm()) && | |||
8065 | !Operand.isSignedOffset<24, 1>()) | |||
8066 | return Error(Operands[op]->getStartLoc(), "branch target out of range"); | |||
8067 | break; | |||
8068 | } | |||
8069 | // Final range checking for Thumb conditional branch instructions. | |||
8070 | case ARM::tBcc: | |||
8071 | if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<8, 1>()) | |||
8072 | return Error(Operands[2]->getStartLoc(), "branch target out of range"); | |||
8073 | break; | |||
8074 | case ARM::t2Bcc: { | |||
8075 | int Op = (Operands[2]->isImm()) ? 2 : 3; | |||
8076 | if (!static_cast<ARMOperand &>(*Operands[Op]).isSignedOffset<20, 1>()) | |||
8077 | return Error(Operands[Op]->getStartLoc(), "branch target out of range"); | |||
8078 | break; | |||
8079 | } | |||
8080 | case ARM::tCBZ: | |||
8081 | case ARM::tCBNZ: { | |||
8082 | if (!static_cast<ARMOperand &>(*Operands[2]).isUnsignedOffset<6, 1>()) | |||
8083 | return Error(Operands[2]->getStartLoc(), "branch target out of range"); | |||
8084 | break; | |||
8085 | } | |||
8086 | case ARM::MOVi16: | |||
8087 | case ARM::MOVTi16: | |||
8088 | case ARM::t2MOVi16: | |||
8089 | case ARM::t2MOVTi16: | |||
8090 | { | |||
8091 | // We want to avoid misleadingly allowing something like "mov r0, <symbol>" | |||
8092 | // especially when we turn it into a movw and the expression <symbol> does | |||
8093 | // not have a :lower16: or :upper16 as part of the expression. We don't | |||
8094 | // want the behavior of silently truncating, which can be unexpected and | |||
8095 | // lead to bugs that are difficult to find since this is an easy mistake | |||
8096 | // to make. | |||
8097 | int i = (Operands[3]->isImm()) ? 3 : 4; | |||
8098 | ARMOperand &Op = static_cast<ARMOperand &>(*Operands[i]); | |||
8099 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()); | |||
8100 | if (CE) break; | |||
8101 | const MCExpr *E = dyn_cast<MCExpr>(Op.getImm()); | |||
8102 | if (!E) break; | |||
8103 | const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E); | |||
8104 | if (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 && | |||
8105 | ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16)) | |||
8106 | return Error( | |||
8107 | Op.getStartLoc(), | |||
8108 | "immediate expression for mov requires :lower16: or :upper16"); | |||
8109 | break; | |||
8110 | } | |||
8111 | case ARM::HINT: | |||
8112 | case ARM::t2HINT: { | |||
8113 | unsigned Imm8 = Inst.getOperand(0).getImm(); | |||
8114 | unsigned Pred = Inst.getOperand(1).getImm(); | |||
8115 | // ESB is not predicable (pred must be AL). Without the RAS extension, this | |||
8116 | // behaves as any other unallocated hint. | |||
8117 | if (Imm8 == 0x10 && Pred != ARMCC::AL && hasRAS()) | |||
8118 | return Error(Operands[1]->getStartLoc(), "instruction 'esb' is not " | |||
8119 | "predicable, but condition " | |||
8120 | "code specified"); | |||
8121 | if (Imm8 == 0x14 && Pred != ARMCC::AL) | |||
8122 | return Error(Operands[1]->getStartLoc(), "instruction 'csdb' is not " | |||
8123 | "predicable, but condition " | |||
8124 | "code specified"); | |||
8125 | break; | |||
8126 | } | |||
8127 | case ARM::t2BFi: | |||
8128 | case ARM::t2BFr: | |||
8129 | case ARM::t2BFLi: | |||
8130 | case ARM::t2BFLr: { | |||
8131 | if (!static_cast<ARMOperand &>(*Operands[2]).isUnsignedOffset<4, 1>() || | |||
8132 | (Inst.getOperand(0).isImm() && Inst.getOperand(0).getImm() == 0)) | |||
8133 | return Error(Operands[2]->getStartLoc(), | |||
8134 | "branch location out of range or not a multiple of 2"); | |||
8135 | ||||
8136 | if (Opcode == ARM::t2BFi) { | |||
8137 | if (!static_cast<ARMOperand &>(*Operands[3]).isSignedOffset<16, 1>()) | |||
8138 | return Error(Operands[3]->getStartLoc(), | |||
8139 | "branch target out of range or not a multiple of 2"); | |||
8140 | } else if (Opcode == ARM::t2BFLi) { | |||
8141 | if (!static_cast<ARMOperand &>(*Operands[3]).isSignedOffset<18, 1>()) | |||
8142 | return Error(Operands[3]->getStartLoc(), | |||
8143 | "branch target out of range or not a multiple of 2"); | |||
8144 | } | |||
8145 | break; | |||
8146 | } | |||
8147 | case ARM::t2BFic: { | |||
8148 | if (!static_cast<ARMOperand &>(*Operands[1]).isUnsignedOffset<4, 1>() || | |||
8149 | (Inst.getOperand(0).isImm() && Inst.getOperand(0).getImm() == 0)) | |||
8150 | return Error(Operands[1]->getStartLoc(), | |||
8151 | "branch location out of range or not a multiple of 2"); | |||
8152 | ||||
8153 | if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<16, 1>()) | |||
8154 | return Error(Operands[2]->getStartLoc(), | |||
8155 | "branch target out of range or not a multiple of 2"); | |||
8156 | ||||
8157 | assert(Inst.getOperand(0).isImm() == Inst.getOperand(2).isImm() &&(static_cast <bool> (Inst.getOperand(0).isImm() == Inst .getOperand(2).isImm() && "branch location and else branch target should either both be " "immediates or both labels") ? void (0) : __assert_fail ("Inst.getOperand(0).isImm() == Inst.getOperand(2).isImm() && \"branch location and else branch target should either both be \" \"immediates or both labels\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 8159, __extension__ __PRETTY_FUNCTION__)) | |||
8158 | "branch location and else branch target should either both be "(static_cast <bool> (Inst.getOperand(0).isImm() == Inst .getOperand(2).isImm() && "branch location and else branch target should either both be " "immediates or both labels") ? void (0) : __assert_fail ("Inst.getOperand(0).isImm() == Inst.getOperand(2).isImm() && \"branch location and else branch target should either both be \" \"immediates or both labels\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 8159, __extension__ __PRETTY_FUNCTION__)) | |||
8159 | "immediates or both labels")(static_cast <bool> (Inst.getOperand(0).isImm() == Inst .getOperand(2).isImm() && "branch location and else branch target should either both be " "immediates or both labels") ? void (0) : __assert_fail ("Inst.getOperand(0).isImm() == Inst.getOperand(2).isImm() && \"branch location and else branch target should either both be \" \"immediates or both labels\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 8159, __extension__ __PRETTY_FUNCTION__)); | |||
8160 | ||||
8161 | if (Inst.getOperand(0).isImm() && Inst.getOperand(2).isImm()) { | |||
8162 | int Diff = Inst.getOperand(2).getImm() - Inst.getOperand(0).getImm(); | |||
8163 | if (Diff != 4 && Diff != 2) | |||
8164 | return Error( | |||
8165 | Operands[3]->getStartLoc(), | |||
8166 | "else branch target must be 2 or 4 greater than the branch location"); | |||
8167 | } | |||
8168 | break; | |||
8169 | } | |||
8170 | case ARM::t2CLRM: { | |||
8171 | for (unsigned i = 2; i < Inst.getNumOperands(); i++) { | |||
8172 | if (Inst.getOperand(i).isReg() && | |||
8173 | !ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains( | |||
8174 | Inst.getOperand(i).getReg())) { | |||
8175 | return Error(Operands[2]->getStartLoc(), | |||
8176 | "invalid register in register list. Valid registers are " | |||
8177 | "r0-r12, lr/r14 and APSR."); | |||
8178 | } | |||
8179 | } | |||
8180 | break; | |||
8181 | } | |||
8182 | case ARM::DSB: | |||
8183 | case ARM::t2DSB: { | |||
8184 | ||||
8185 | if (Inst.getNumOperands() < 2) | |||
8186 | break; | |||
8187 | ||||
8188 | unsigned Option = Inst.getOperand(0).getImm(); | |||
8189 | unsigned Pred = Inst.getOperand(1).getImm(); | |||
8190 | ||||
8191 | // SSBB and PSSBB (DSB #0|#4) are not predicable (pred must be AL). | |||
8192 | if (Option == 0 && Pred != ARMCC::AL) | |||
8193 | return Error(Operands[1]->getStartLoc(), | |||
8194 | "instruction 'ssbb' is not predicable, but condition code " | |||
8195 | "specified"); | |||
8196 | if (Option == 4 && Pred != ARMCC::AL) | |||
8197 | return Error(Operands[1]->getStartLoc(), | |||
8198 | "instruction 'pssbb' is not predicable, but condition code " | |||
8199 | "specified"); | |||
8200 | break; | |||
8201 | } | |||
8202 | case ARM::VMOVRRS: { | |||
8203 | // Source registers must be sequential. | |||
8204 | const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(2).getReg()); | |||
8205 | const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(3).getReg()); | |||
8206 | if (Sm1 != Sm + 1) | |||
8207 | return Error(Operands[5]->getStartLoc(), | |||
8208 | "source operands must be sequential"); | |||
8209 | break; | |||
8210 | } | |||
8211 | case ARM::VMOVSRR: { | |||
8212 | // Destination registers must be sequential. | |||
8213 | const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(0).getReg()); | |||
8214 | const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(1).getReg()); | |||
8215 | if (Sm1 != Sm + 1) | |||
8216 | return Error(Operands[3]->getStartLoc(), | |||
8217 | "destination operands must be sequential"); | |||
8218 | break; | |||
8219 | } | |||
8220 | case ARM::VLDMDIA: | |||
8221 | case ARM::VSTMDIA: { | |||
8222 | ARMOperand &Op = static_cast<ARMOperand&>(*Operands[3]); | |||
8223 | auto &RegList = Op.getRegList(); | |||
8224 | if (RegList.size() < 1 || RegList.size() > 16) | |||
8225 | return Error(Operands[3]->getStartLoc(), | |||
8226 | "list of registers must be at least 1 and at most 16"); | |||
8227 | break; | |||
8228 | } | |||
8229 | case ARM::MVE_VQDMULLs32bh: | |||
8230 | case ARM::MVE_VQDMULLs32th: | |||
8231 | case ARM::MVE_VCMULf32: | |||
8232 | case ARM::MVE_VMULLBs32: | |||
8233 | case ARM::MVE_VMULLTs32: | |||
8234 | case ARM::MVE_VMULLBu32: | |||
8235 | case ARM::MVE_VMULLTu32: { | |||
8236 | if (Operands[3]->getReg() == Operands[4]->getReg()) { | |||
8237 | return Error (Operands[3]->getStartLoc(), | |||
8238 | "Qd register and Qn register can't be identical"); | |||
8239 | } | |||
8240 | if (Operands[3]->getReg() == Operands[5]->getReg()) { | |||
8241 | return Error (Operands[3]->getStartLoc(), | |||
8242 | "Qd register and Qm register can't be identical"); | |||
8243 | } | |||
8244 | break; | |||
8245 | } | |||
8246 | case ARM::MVE_VMOV_rr_q: { | |||
8247 | if (Operands[4]->getReg() != Operands[6]->getReg()) | |||
8248 | return Error (Operands[4]->getStartLoc(), "Q-registers must be the same"); | |||
8249 | if (static_cast<ARMOperand &>(*Operands[5]).getVectorIndex() != | |||
8250 | static_cast<ARMOperand &>(*Operands[7]).getVectorIndex() + 2) | |||
8251 | return Error (Operands[5]->getStartLoc(), "Q-register indexes must be 2 and 0 or 3 and 1"); | |||
8252 | break; | |||
8253 | } | |||
8254 | case ARM::MVE_VMOV_q_rr: { | |||
8255 | if (Operands[2]->getReg() != Operands[4]->getReg()) | |||
8256 | return Error (Operands[2]->getStartLoc(), "Q-registers must be the same"); | |||
8257 | if (static_cast<ARMOperand &>(*Operands[3]).getVectorIndex() != | |||
8258 | static_cast<ARMOperand &>(*Operands[5]).getVectorIndex() + 2) | |||
8259 | return Error (Operands[3]->getStartLoc(), "Q-register indexes must be 2 and 0 or 3 and 1"); | |||
8260 | break; | |||
8261 | } | |||
8262 | case ARM::UMAAL: | |||
8263 | case ARM::UMLAL: | |||
8264 | case ARM::UMULL: | |||
8265 | case ARM::t2UMAAL: | |||
8266 | case ARM::t2UMLAL: | |||
8267 | case ARM::t2UMULL: | |||
8268 | case ARM::SMLAL: | |||
8269 | case ARM::SMLALBB: | |||
8270 | case ARM::SMLALBT: | |||
8271 | case ARM::SMLALD: | |||
8272 | case ARM::SMLALDX: | |||
8273 | case ARM::SMLALTB: | |||
8274 | case ARM::SMLALTT: | |||
8275 | case ARM::SMLSLD: | |||
8276 | case ARM::SMLSLDX: | |||
8277 | case ARM::SMULL: | |||
8278 | case ARM::t2SMLAL: | |||
8279 | case ARM::t2SMLALBB: | |||
8280 | case ARM::t2SMLALBT: | |||
8281 | case ARM::t2SMLALD: | |||
8282 | case ARM::t2SMLALDX: | |||
8283 | case ARM::t2SMLALTB: | |||
8284 | case ARM::t2SMLALTT: | |||
8285 | case ARM::t2SMLSLD: | |||
8286 | case ARM::t2SMLSLDX: | |||
8287 | case ARM::t2SMULL: { | |||
8288 | unsigned RdHi = Inst.getOperand(0).getReg(); | |||
8289 | unsigned RdLo = Inst.getOperand(1).getReg(); | |||
8290 | if(RdHi == RdLo) { | |||
8291 | return Error(Loc, | |||
8292 | "unpredictable instruction, RdHi and RdLo must be different"); | |||
8293 | } | |||
8294 | break; | |||
8295 | } | |||
8296 | ||||
8297 | case ARM::CDE_CX1: | |||
8298 | case ARM::CDE_CX1A: | |||
8299 | case ARM::CDE_CX1D: | |||
8300 | case ARM::CDE_CX1DA: | |||
8301 | case ARM::CDE_CX2: | |||
8302 | case ARM::CDE_CX2A: | |||
8303 | case ARM::CDE_CX2D: | |||
8304 | case ARM::CDE_CX2DA: | |||
8305 | case ARM::CDE_CX3: | |||
8306 | case ARM::CDE_CX3A: | |||
8307 | case ARM::CDE_CX3D: | |||
8308 | case ARM::CDE_CX3DA: | |||
8309 | case ARM::CDE_VCX1_vec: | |||
8310 | case ARM::CDE_VCX1_fpsp: | |||
8311 | case ARM::CDE_VCX1_fpdp: | |||
8312 | case ARM::CDE_VCX1A_vec: | |||
8313 | case ARM::CDE_VCX1A_fpsp: | |||
8314 | case ARM::CDE_VCX1A_fpdp: | |||
8315 | case ARM::CDE_VCX2_vec: | |||
8316 | case ARM::CDE_VCX2_fpsp: | |||
8317 | case ARM::CDE_VCX2_fpdp: | |||
8318 | case ARM::CDE_VCX2A_vec: | |||
8319 | case ARM::CDE_VCX2A_fpsp: | |||
8320 | case ARM::CDE_VCX2A_fpdp: | |||
8321 | case ARM::CDE_VCX3_vec: | |||
8322 | case ARM::CDE_VCX3_fpsp: | |||
8323 | case ARM::CDE_VCX3_fpdp: | |||
8324 | case ARM::CDE_VCX3A_vec: | |||
8325 | case ARM::CDE_VCX3A_fpsp: | |||
8326 | case ARM::CDE_VCX3A_fpdp: { | |||
8327 | assert(Inst.getOperand(1).isImm() &&(static_cast <bool> (Inst.getOperand(1).isImm() && "CDE operand 1 must be a coprocessor ID") ? void (0) : __assert_fail ("Inst.getOperand(1).isImm() && \"CDE operand 1 must be a coprocessor ID\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 8328, __extension__ __PRETTY_FUNCTION__)) | |||
8328 | "CDE operand 1 must be a coprocessor ID")(static_cast <bool> (Inst.getOperand(1).isImm() && "CDE operand 1 must be a coprocessor ID") ? void (0) : __assert_fail ("Inst.getOperand(1).isImm() && \"CDE operand 1 must be a coprocessor ID\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 8328, __extension__ __PRETTY_FUNCTION__)); | |||
8329 | int64_t Coproc = Inst.getOperand(1).getImm(); | |||
8330 | if (Coproc < 8 && !ARM::isCDECoproc(Coproc, *STI)) | |||
8331 | return Error(Operands[1]->getStartLoc(), | |||
8332 | "coprocessor must be configured as CDE"); | |||
8333 | else if (Coproc >= 8) | |||
8334 | return Error(Operands[1]->getStartLoc(), | |||
8335 | "coprocessor must be in the range [p0, p7]"); | |||
8336 | break; | |||
8337 | } | |||
8338 | ||||
8339 | case ARM::t2CDP: | |||
8340 | case ARM::t2CDP2: | |||
8341 | case ARM::t2LDC2L_OFFSET: | |||
8342 | case ARM::t2LDC2L_OPTION: | |||
8343 | case ARM::t2LDC2L_POST: | |||
8344 | case ARM::t2LDC2L_PRE: | |||
8345 | case ARM::t2LDC2_OFFSET: | |||
8346 | case ARM::t2LDC2_OPTION: | |||
8347 | case ARM::t2LDC2_POST: | |||
8348 | case ARM::t2LDC2_PRE: | |||
8349 | case ARM::t2LDCL_OFFSET: | |||
8350 | case ARM::t2LDCL_OPTION: | |||
8351 | case ARM::t2LDCL_POST: | |||
8352 | case ARM::t2LDCL_PRE: | |||
8353 | case ARM::t2LDC_OFFSET: | |||
8354 | case ARM::t2LDC_OPTION: | |||
8355 | case ARM::t2LDC_POST: | |||
8356 | case ARM::t2LDC_PRE: | |||
8357 | case ARM::t2MCR: | |||
8358 | case ARM::t2MCR2: | |||
8359 | case ARM::t2MCRR: | |||
8360 | case ARM::t2MCRR2: | |||
8361 | case ARM::t2MRC: | |||
8362 | case ARM::t2MRC2: | |||
8363 | case ARM::t2MRRC: | |||
8364 | case ARM::t2MRRC2: | |||
8365 | case ARM::t2STC2L_OFFSET: | |||
8366 | case ARM::t2STC2L_OPTION: | |||
8367 | case ARM::t2STC2L_POST: | |||
8368 | case ARM::t2STC2L_PRE: | |||
8369 | case ARM::t2STC2_OFFSET: | |||
8370 | case ARM::t2STC2_OPTION: | |||
8371 | case ARM::t2STC2_POST: | |||
8372 | case ARM::t2STC2_PRE: | |||
8373 | case ARM::t2STCL_OFFSET: | |||
8374 | case ARM::t2STCL_OPTION: | |||
8375 | case ARM::t2STCL_POST: | |||
8376 | case ARM::t2STCL_PRE: | |||
8377 | case ARM::t2STC_OFFSET: | |||
8378 | case ARM::t2STC_OPTION: | |||
8379 | case ARM::t2STC_POST: | |||
8380 | case ARM::t2STC_PRE: { | |||
8381 | unsigned Opcode = Inst.getOpcode(); | |||
8382 | // Inst.getOperand indexes operands in the (oops ...) and (iops ...) dags, | |||
8383 | // CopInd is the index of the coprocessor operand. | |||
8384 | size_t CopInd = 0; | |||
8385 | if (Opcode == ARM::t2MRRC || Opcode == ARM::t2MRRC2) | |||
8386 | CopInd = 2; | |||
8387 | else if (Opcode == ARM::t2MRC || Opcode == ARM::t2MRC2) | |||
8388 | CopInd = 1; | |||
8389 | assert(Inst.getOperand(CopInd).isImm() &&(static_cast <bool> (Inst.getOperand(CopInd).isImm() && "Operand must be a coprocessor ID") ? void (0) : __assert_fail ("Inst.getOperand(CopInd).isImm() && \"Operand must be a coprocessor ID\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 8390, __extension__ __PRETTY_FUNCTION__)) | |||
8390 | "Operand must be a coprocessor ID")(static_cast <bool> (Inst.getOperand(CopInd).isImm() && "Operand must be a coprocessor ID") ? void (0) : __assert_fail ("Inst.getOperand(CopInd).isImm() && \"Operand must be a coprocessor ID\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 8390, __extension__ __PRETTY_FUNCTION__)); | |||
8391 | int64_t Coproc = Inst.getOperand(CopInd).getImm(); | |||
8392 | // Operands[2] is the coprocessor operand at syntactic level | |||
8393 | if (ARM::isCDECoproc(Coproc, *STI)) | |||
8394 | return Error(Operands[2]->getStartLoc(), | |||
8395 | "coprocessor must be configured as GCP"); | |||
8396 | break; | |||
8397 | } | |||
8398 | } | |||
8399 | ||||
8400 | return false; | |||
8401 | } | |||
8402 | ||||
8403 | static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) { | |||
8404 | switch(Opc) { | |||
8405 | default: llvm_unreachable("unexpected opcode!")::llvm::llvm_unreachable_internal("unexpected opcode!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 8405); | |||
8406 | // VST1LN | |||
8407 | case ARM::VST1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD; | |||
8408 | case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD; | |||
8409 | case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD; | |||
8410 | case ARM::VST1LNdWB_register_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD; | |||
8411 | case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD; | |||
8412 | case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD; | |||
8413 | case ARM::VST1LNdAsm_8: Spacing = 1; return ARM::VST1LNd8; | |||
8414 | case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16; | |||
8415 | case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32; | |||
8416 | ||||
8417 | // VST2LN | |||
8418 | case ARM::VST2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD; | |||
8419 | case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD; | |||
8420 | case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD; | |||
8421 | case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD; | |||
8422 | case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD; | |||
8423 | ||||
8424 | case ARM::VST2LNdWB_register_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD; | |||
8425 | case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD; | |||
8426 | case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD; | |||
8427 | case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD; | |||
8428 | case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD; | |||
8429 | ||||
8430 | case ARM::VST2LNdAsm_8: Spacing = 1; return ARM::VST2LNd8; | |||
8431 | case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16; | |||
8432 | case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32; | |||
8433 | case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16; | |||
8434 | case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32; | |||
8435 | ||||
8436 | // VST3LN | |||
8437 | case ARM::VST3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD; | |||
8438 | case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD; | |||
8439 | case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD; | |||
8440 | case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD; | |||
8441 | case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD; | |||
8442 | case ARM::VST3LNdWB_register_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD; | |||
8443 | case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD; | |||
8444 | case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD; | |||
8445 | case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD; | |||
8446 | case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD; | |||
8447 | case ARM::VST3LNdAsm_8: Spacing = 1; return ARM::VST3LNd8; | |||
8448 | case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16; | |||
8449 | case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32; | |||
8450 | case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16; | |||
8451 | case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32; | |||
8452 | ||||
8453 | // VST3 | |||
8454 | case ARM::VST3dWB_fixed_Asm_8: Spacing = 1; return ARM::VST3d8_UPD; | |||
8455 | case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD; | |||
8456 | case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD; | |||
8457 | case ARM::VST3qWB_fixed_Asm_8: Spacing = 2; return ARM::VST3q8_UPD; | |||
8458 | case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD; | |||
8459 | case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD; | |||
8460 | case ARM::VST3dWB_register_Asm_8: Spacing = 1; return ARM::VST3d8_UPD; | |||
8461 | case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD; | |||
8462 | case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD; | |||
8463 | case ARM::VST3qWB_register_Asm_8: Spacing = 2; return ARM::VST3q8_UPD; | |||
8464 | case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD; | |||
8465 | case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD; | |||
8466 | case ARM::VST3dAsm_8: Spacing = 1; return ARM::VST3d8; | |||
8467 | case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16; | |||
8468 | case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32; | |||
8469 | case ARM::VST3qAsm_8: Spacing = 2; return ARM::VST3q8; | |||
8470 | case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16; | |||
8471 | case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32; | |||
8472 | ||||
8473 | // VST4LN | |||
8474 | case ARM::VST4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD; | |||
8475 | case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD; | |||
8476 | case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD; | |||
8477 | case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD; | |||
8478 | case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD; | |||
8479 | case ARM::VST4LNdWB_register_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD; | |||
8480 | case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD; | |||
8481 | case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD; | |||
8482 | case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD; | |||
8483 | case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD; | |||
8484 | case ARM::VST4LNdAsm_8: Spacing = 1; return ARM::VST4LNd8; | |||
8485 | case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16; | |||
8486 | case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32; | |||
8487 | case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16; | |||
8488 | case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32; | |||
8489 | ||||
8490 | // VST4 | |||
8491 | case ARM::VST4dWB_fixed_Asm_8: Spacing = 1; return ARM::VST4d8_UPD; | |||
8492 | case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD; | |||
8493 | case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD; | |||
8494 | case ARM::VST4qWB_fixed_Asm_8: Spacing = 2; return ARM::VST4q8_UPD; | |||
8495 | case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD; | |||
8496 | case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD; | |||
8497 | case ARM::VST4dWB_register_Asm_8: Spacing = 1; return ARM::VST4d8_UPD; | |||
8498 | case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD; | |||
8499 | case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD; | |||
8500 | case ARM::VST4qWB_register_Asm_8: Spacing = 2; return ARM::VST4q8_UPD; | |||
8501 | case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD; | |||
8502 | case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD; | |||
8503 | case ARM::VST4dAsm_8: Spacing = 1; return ARM::VST4d8; | |||
8504 | case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16; | |||
8505 | case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32; | |||
8506 | case ARM::VST4qAsm_8: Spacing = 2; return ARM::VST4q8; | |||
8507 | case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16; | |||
8508 | case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32; | |||
8509 | } | |||
8510 | } | |||
8511 | ||||
8512 | static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) { | |||
8513 | switch(Opc) { | |||
8514 | default: llvm_unreachable("unexpected opcode!")::llvm::llvm_unreachable_internal("unexpected opcode!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 8514); | |||
8515 | // VLD1LN | |||
8516 | case ARM::VLD1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD; | |||
8517 | case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD; | |||
8518 | case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD; | |||
8519 | case ARM::VLD1LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD; | |||
8520 | case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD; | |||
8521 | case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD; | |||
8522 | case ARM::VLD1LNdAsm_8: Spacing = 1; return ARM::VLD1LNd8; | |||
8523 | case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16; | |||
8524 | case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32; | |||
8525 | ||||
8526 | // VLD2LN | |||
8527 | case ARM::VLD2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD; | |||
8528 | case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD; | |||
8529 | case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD; | |||
8530 | case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD; | |||
8531 | case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD; | |||
8532 | case ARM::VLD2LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD; | |||
8533 | case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD; | |||
8534 | case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD; | |||
8535 | case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD; | |||
8536 | case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD; | |||
8537 | case ARM::VLD2LNdAsm_8: Spacing = 1; return ARM::VLD2LNd8; | |||
8538 | case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16; | |||
8539 | case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32; | |||
8540 | case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16; | |||
8541 | case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32; | |||
8542 | ||||
8543 | // VLD3DUP | |||
8544 | case ARM::VLD3DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD; | |||
8545 | case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD; | |||
8546 | case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD; | |||
8547 | case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD; | |||
8548 | case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD; | |||
8549 | case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD; | |||
8550 | case ARM::VLD3DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD; | |||
8551 | case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD; | |||
8552 | case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD; | |||
8553 | case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD; | |||
8554 | case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD; | |||
8555 | case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD; | |||
8556 | case ARM::VLD3DUPdAsm_8: Spacing = 1; return ARM::VLD3DUPd8; | |||
8557 | case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16; | |||
8558 | case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32; | |||
8559 | case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8; | |||
8560 | case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16; | |||
8561 | case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32; | |||
8562 | ||||
8563 | // VLD3LN | |||
8564 | case ARM::VLD3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD; | |||
8565 | case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD; | |||
8566 | case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD; | |||
8567 | case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD; | |||
8568 | case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD; | |||
8569 | case ARM::VLD3LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD; | |||
8570 | case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD; | |||
8571 | case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD; | |||
8572 | case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD; | |||
8573 | case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD; | |||
8574 | case ARM::VLD3LNdAsm_8: Spacing = 1; return ARM::VLD3LNd8; | |||
8575 | case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16; | |||
8576 | case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32; | |||
8577 | case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16; | |||
8578 | case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32; | |||
8579 | ||||
8580 | // VLD3 | |||
8581 | case ARM::VLD3dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD; | |||
8582 | case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD; | |||
8583 | case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD; | |||
8584 | case ARM::VLD3qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD; | |||
8585 | case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD; | |||
8586 | case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD; | |||
8587 | case ARM::VLD3dWB_register_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD; | |||
8588 | case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD; | |||
8589 | case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD; | |||
8590 | case ARM::VLD3qWB_register_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD; | |||
8591 | case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD; | |||
8592 | case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD; | |||
8593 | case ARM::VLD3dAsm_8: Spacing = 1; return ARM::VLD3d8; | |||
8594 | case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16; | |||
8595 | case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32; | |||
8596 | case ARM::VLD3qAsm_8: Spacing = 2; return ARM::VLD3q8; | |||
8597 | case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16; | |||
8598 | case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32; | |||
8599 | ||||
8600 | // VLD4LN | |||
8601 | case ARM::VLD4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD; | |||
8602 | case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD; | |||
8603 | case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD; | |||
8604 | case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD; | |||
8605 | case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD; | |||
8606 | case ARM::VLD4LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD; | |||
8607 | case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD; | |||
8608 | case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD; | |||
8609 | case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD; | |||
8610 | case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD; | |||
8611 | case ARM::VLD4LNdAsm_8: Spacing = 1; return ARM::VLD4LNd8; | |||
8612 | case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16; | |||
8613 | case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32; | |||
8614 | case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16; | |||
8615 | case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32; | |||
8616 | ||||
8617 | // VLD4DUP | |||
8618 | case ARM::VLD4DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD; | |||
8619 | case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD; | |||
8620 | case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD; | |||
8621 | case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD; | |||
8622 | case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD; | |||
8623 | case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD; | |||
8624 | case ARM::VLD4DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD; | |||
8625 | case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD; | |||
8626 | case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD; | |||
8627 | case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD; | |||
8628 | case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD; | |||
8629 | case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD; | |||
8630 | case ARM::VLD4DUPdAsm_8: Spacing = 1; return ARM::VLD4DUPd8; | |||
8631 | case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16; | |||
8632 | case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32; | |||
8633 | case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8; | |||
8634 | case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16; | |||
8635 | case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32; | |||
8636 | ||||
8637 | // VLD4 | |||
8638 | case ARM::VLD4dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD; | |||
8639 | case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD; | |||
8640 | case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD; | |||
8641 | case ARM::VLD4qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD; | |||
8642 | case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD; | |||
8643 | case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD; | |||
8644 | case ARM::VLD4dWB_register_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD; | |||
8645 | case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD; | |||
8646 | case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD; | |||
8647 | case ARM::VLD4qWB_register_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD; | |||
8648 | case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD; | |||
8649 | case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD; | |||
8650 | case ARM::VLD4dAsm_8: Spacing = 1; return ARM::VLD4d8; | |||
8651 | case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16; | |||
8652 | case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32; | |||
8653 | case ARM::VLD4qAsm_8: Spacing = 2; return ARM::VLD4q8; | |||
8654 | case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16; | |||
8655 | case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32; | |||
8656 | } | |||
8657 | } | |||
8658 | ||||
8659 | bool ARMAsmParser::processInstruction(MCInst &Inst, | |||
8660 | const OperandVector &Operands, | |||
8661 | MCStreamer &Out) { | |||
8662 | // Check if we have the wide qualifier, because if it's present we | |||
8663 | // must avoid selecting a 16-bit thumb instruction. | |||
8664 | bool HasWideQualifier = false; | |||
8665 | for (auto &Op : Operands) { | |||
8666 | ARMOperand &ARMOp = static_cast<ARMOperand&>(*Op); | |||
8667 | if (ARMOp.isToken() && ARMOp.getToken() == ".w") { | |||
8668 | HasWideQualifier = true; | |||
8669 | break; | |||
8670 | } | |||
8671 | } | |||
8672 | ||||
8673 | switch (Inst.getOpcode()) { | |||
8674 | // Alias for alternate form of 'ldr{,b}t Rt, [Rn], #imm' instruction. | |||
8675 | case ARM::LDRT_POST: | |||
8676 | case ARM::LDRBT_POST: { | |||
8677 | const unsigned Opcode = | |||
8678 | (Inst.getOpcode() == ARM::LDRT_POST) ? ARM::LDRT_POST_IMM | |||
8679 | : ARM::LDRBT_POST_IMM; | |||
8680 | MCInst TmpInst; | |||
8681 | TmpInst.setOpcode(Opcode); | |||
8682 | TmpInst.addOperand(Inst.getOperand(0)); | |||
8683 | TmpInst.addOperand(Inst.getOperand(1)); | |||
8684 | TmpInst.addOperand(Inst.getOperand(1)); | |||
8685 | TmpInst.addOperand(MCOperand::createReg(0)); | |||
8686 | TmpInst.addOperand(MCOperand::createImm(0)); | |||
8687 | TmpInst.addOperand(Inst.getOperand(2)); | |||
8688 | TmpInst.addOperand(Inst.getOperand(3)); | |||
8689 | Inst = TmpInst; | |||
8690 | return true; | |||
8691 | } | |||
8692 | // Alias for 'ldr{sb,h,sh}t Rt, [Rn] {, #imm}' for ommitted immediate. | |||
8693 | case ARM::LDRSBTii: | |||
8694 | case ARM::LDRHTii: | |||
8695 | case ARM::LDRSHTii: { | |||
8696 | MCInst TmpInst; | |||
8697 | ||||
8698 | if (Inst.getOpcode() == ARM::LDRSBTii) | |||
8699 | TmpInst.setOpcode(ARM::LDRSBTi); | |||
8700 | else if (Inst.getOpcode() == ARM::LDRHTii) | |||
8701 | TmpInst.setOpcode(ARM::LDRHTi); | |||
8702 | else if (Inst.getOpcode() == ARM::LDRSHTii) | |||
8703 | TmpInst.setOpcode(ARM::LDRSHTi); | |||
8704 | TmpInst.addOperand(Inst.getOperand(0)); | |||
8705 | TmpInst.addOperand(Inst.getOperand(1)); | |||
8706 | TmpInst.addOperand(Inst.getOperand(1)); | |||
8707 | TmpInst.addOperand(MCOperand::createImm(256)); | |||
8708 | TmpInst.addOperand(Inst.getOperand(2)); | |||
8709 | Inst = TmpInst; | |||
8710 | return true; | |||
8711 | } | |||
8712 | // Alias for alternate form of 'str{,b}t Rt, [Rn], #imm' instruction. | |||
8713 | case ARM::STRT_POST: | |||
8714 | case ARM::STRBT_POST: { | |||
8715 | const unsigned Opcode = | |||
8716 | (Inst.getOpcode() == ARM::STRT_POST) ? ARM::STRT_POST_IMM | |||
8717 | : ARM::STRBT_POST_IMM; | |||
8718 | MCInst TmpInst; | |||
8719 | TmpInst.setOpcode(Opcode); | |||
8720 | TmpInst.addOperand(Inst.getOperand(1)); | |||
8721 | TmpInst.addOperand(Inst.getOperand(0)); | |||
8722 | TmpInst.addOperand(Inst.getOperand(1)); | |||
8723 | TmpInst.addOperand(MCOperand::createReg(0)); | |||
8724 | TmpInst.addOperand(MCOperand::createImm(0)); | |||
8725 | TmpInst.addOperand(Inst.getOperand(2)); | |||
8726 | TmpInst.addOperand(Inst.getOperand(3)); | |||
8727 | Inst = TmpInst; | |||
8728 | return true; | |||
8729 | } | |||
8730 | // Alias for alternate form of 'ADR Rd, #imm' instruction. | |||
8731 | case ARM::ADDri: { | |||
8732 | if (Inst.getOperand(1).getReg() != ARM::PC || | |||
8733 | Inst.getOperand(5).getReg() != 0 || | |||
8734 | !(Inst.getOperand(2).isExpr() || Inst.getOperand(2).isImm())) | |||
8735 | return false; | |||
8736 | MCInst TmpInst; | |||
8737 | TmpInst.setOpcode(ARM::ADR); | |||
8738 | TmpInst.addOperand(Inst.getOperand(0)); | |||
8739 | if (Inst.getOperand(2).isImm()) { | |||
8740 | // Immediate (mod_imm) will be in its encoded form, we must unencode it | |||
8741 | // before passing it to the ADR instruction. | |||
8742 | unsigned Enc = Inst.getOperand(2).getImm(); | |||
8743 | TmpInst.addOperand(MCOperand::createImm( | |||
8744 | ARM_AM::rotr32(Enc & 0xFF, (Enc & 0xF00) >> 7))); | |||
8745 | } else { | |||
8746 | // Turn PC-relative expression into absolute expression. | |||
8747 | // Reading PC provides the start of the current instruction + 8 and | |||
8748 | // the transform to adr is biased by that. | |||
8749 | MCSymbol *Dot = getContext().createTempSymbol(); | |||
8750 | Out.emitLabel(Dot); | |||
8751 | const MCExpr *OpExpr = Inst.getOperand(2).getExpr(); | |||
8752 | const MCExpr *InstPC = MCSymbolRefExpr::create(Dot, | |||
8753 | MCSymbolRefExpr::VK_None, | |||
8754 | getContext()); | |||
8755 | const MCExpr *Const8 = MCConstantExpr::create(8, getContext()); | |||
8756 | const MCExpr *ReadPC = MCBinaryExpr::createAdd(InstPC, Const8, | |||
8757 | getContext()); | |||
8758 | const MCExpr *FixupAddr = MCBinaryExpr::createAdd(ReadPC, OpExpr, | |||
8759 | getContext()); | |||
8760 | TmpInst.addOperand(MCOperand::createExpr(FixupAddr)); | |||
8761 | } | |||
8762 | TmpInst.addOperand(Inst.getOperand(3)); | |||
8763 | TmpInst.addOperand(Inst.getOperand(4)); | |||
8764 | Inst = TmpInst; | |||
8765 | return true; | |||
8766 | } | |||
8767 | // Aliases for imm syntax of LDR instructions. | |||
8768 | case ARM::t2LDR_PRE_imm: | |||
8769 | case ARM::t2LDR_POST_imm: { | |||
8770 | MCInst TmpInst; | |||
8771 | TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDR_PRE_imm ? ARM::t2LDR_PRE | |||
8772 | : ARM::t2LDR_POST); | |||
8773 | TmpInst.addOperand(Inst.getOperand(0)); // Rt | |||
8774 | TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb | |||
8775 | TmpInst.addOperand(Inst.getOperand(1)); // Rn | |||
8776 | TmpInst.addOperand(Inst.getOperand(2)); // imm | |||
8777 | TmpInst.addOperand(Inst.getOperand(3)); // CondCode | |||
8778 | Inst = TmpInst; | |||
8779 | return true; | |||
8780 | } | |||
8781 | // Aliases for imm syntax of STR instructions. | |||
8782 | case ARM::t2STR_PRE_imm: | |||
8783 | case ARM::t2STR_POST_imm: { | |||
8784 | MCInst TmpInst; | |||
8785 | TmpInst.setOpcode(Inst.getOpcode() == ARM::t2STR_PRE_imm ? ARM::t2STR_PRE | |||
8786 | : ARM::t2STR_POST); | |||
8787 | TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb | |||
8788 | TmpInst.addOperand(Inst.getOperand(0)); // Rt | |||
8789 | TmpInst.addOperand(Inst.getOperand(1)); // Rn | |||
8790 | TmpInst.addOperand(Inst.getOperand(2)); // imm | |||
8791 | TmpInst.addOperand(Inst.getOperand(3)); // CondCode | |||
8792 | Inst = TmpInst; | |||
8793 | return true; | |||
8794 | } | |||
8795 | // Aliases for alternate PC+imm syntax of LDR instructions. | |||
8796 | case ARM::t2LDRpcrel: | |||
8797 | // Select the narrow version if the immediate will fit. | |||
8798 | if (Inst.getOperand(1).getImm() > 0 && | |||
8799 | Inst.getOperand(1).getImm() <= 0xff && | |||
8800 | !HasWideQualifier) | |||
8801 | Inst.setOpcode(ARM::tLDRpci); | |||
8802 | else | |||
8803 | Inst.setOpcode(ARM::t2LDRpci); | |||
8804 | return true; | |||
8805 | case ARM::t2LDRBpcrel: | |||
8806 | Inst.setOpcode(ARM::t2LDRBpci); | |||
8807 | return true; | |||
8808 | case ARM::t2LDRHpcrel: | |||
8809 | Inst.setOpcode(ARM::t2LDRHpci); | |||
8810 | return true; | |||
8811 | case ARM::t2LDRSBpcrel: | |||
8812 | Inst.setOpcode(ARM::t2LDRSBpci); | |||
8813 | return true; | |||
8814 | case ARM::t2LDRSHpcrel: | |||
8815 | Inst.setOpcode(ARM::t2LDRSHpci); | |||
8816 | return true; | |||
8817 | case ARM::LDRConstPool: | |||
8818 | case ARM::tLDRConstPool: | |||
8819 | case ARM::t2LDRConstPool: { | |||
8820 | // Pseudo instruction ldr rt, =immediate is converted to a | |||
8821 | // MOV rt, immediate if immediate is known and representable | |||
8822 | // otherwise we create a constant pool entry that we load from. | |||
8823 | MCInst TmpInst; | |||
8824 | if (Inst.getOpcode() == ARM::LDRConstPool) | |||
8825 | TmpInst.setOpcode(ARM::LDRi12); | |||
8826 | else if (Inst.getOpcode() == ARM::tLDRConstPool) | |||
8827 | TmpInst.setOpcode(ARM::tLDRpci); | |||
8828 | else if (Inst.getOpcode() == ARM::t2LDRConstPool) | |||
8829 | TmpInst.setOpcode(ARM::t2LDRpci); | |||
8830 | const ARMOperand &PoolOperand = | |||
8831 | (HasWideQualifier ? | |||
8832 | static_cast<ARMOperand &>(*Operands[4]) : | |||
8833 | static_cast<ARMOperand &>(*Operands[3])); | |||
8834 | const MCExpr *SubExprVal = PoolOperand.getConstantPoolImm(); | |||
8835 | // If SubExprVal is a constant we may be able to use a MOV | |||
8836 | if (isa<MCConstantExpr>(SubExprVal) && | |||
8837 | Inst.getOperand(0).getReg() != ARM::PC && | |||
8838 | Inst.getOperand(0).getReg() != ARM::SP) { | |||
8839 | int64_t Value = | |||
8840 | (int64_t) (cast<MCConstantExpr>(SubExprVal))->getValue(); | |||
8841 | bool UseMov = true; | |||
8842 | bool MovHasS = true; | |||
8843 | if (Inst.getOpcode() == ARM::LDRConstPool) { | |||
8844 | // ARM Constant | |||
8845 | if (ARM_AM::getSOImmVal(Value) != -1) { | |||
8846 | Value = ARM_AM::getSOImmVal(Value); | |||
8847 | TmpInst.setOpcode(ARM::MOVi); | |||
8848 | } | |||
8849 | else if (ARM_AM::getSOImmVal(~Value) != -1) { | |||
8850 | Value = ARM_AM::getSOImmVal(~Value); | |||
8851 | TmpInst.setOpcode(ARM::MVNi); | |||
8852 | } | |||
8853 | else if (hasV6T2Ops() && | |||
8854 | Value >=0 && Value < 65536) { | |||
8855 | TmpInst.setOpcode(ARM::MOVi16); | |||
8856 | MovHasS = false; | |||
8857 | } | |||
8858 | else | |||
8859 | UseMov = false; | |||
8860 | } | |||
8861 | else { | |||
8862 | // Thumb/Thumb2 Constant | |||
8863 | if (hasThumb2() && | |||
8864 | ARM_AM::getT2SOImmVal(Value) != -1) | |||
8865 | TmpInst.setOpcode(ARM::t2MOVi); | |||
8866 | else if (hasThumb2() && | |||
8867 | ARM_AM::getT2SOImmVal(~Value) != -1) { | |||
8868 | TmpInst.setOpcode(ARM::t2MVNi); | |||
8869 | Value = ~Value; | |||
8870 | } | |||
8871 | else if (hasV8MBaseline() && | |||
8872 | Value >=0 && Value < 65536) { | |||
8873 | TmpInst.setOpcode(ARM::t2MOVi16); | |||
8874 | MovHasS = false; | |||
8875 | } | |||
8876 | else | |||
8877 | UseMov = false; | |||
8878 | } | |||
8879 | if (UseMov) { | |||
8880 | TmpInst.addOperand(Inst.getOperand(0)); // Rt | |||
8881 | TmpInst.addOperand(MCOperand::createImm(Value)); // Immediate | |||
8882 | TmpInst.addOperand(Inst.getOperand(2)); // CondCode | |||
8883 | TmpInst.addOperand(Inst.getOperand(3)); // CondCode | |||
8884 | if (MovHasS) | |||
8885 | TmpInst.addOperand(MCOperand::createReg(0)); // S | |||
8886 | Inst = TmpInst; | |||
8887 | return true; | |||
8888 | } | |||
8889 | } | |||
8890 | // No opportunity to use MOV/MVN create constant pool | |||
8891 | const MCExpr *CPLoc = | |||
8892 | getTargetStreamer().addConstantPoolEntry(SubExprVal, | |||
8893 | PoolOperand.getStartLoc()); | |||
8894 | TmpInst.addOperand(Inst.getOperand(0)); // Rt | |||
8895 | TmpInst.addOperand(MCOperand::createExpr(CPLoc)); // offset to constpool | |||
8896 | if (TmpInst.getOpcode() == ARM::LDRi12) | |||
8897 | TmpInst.addOperand(MCOperand::createImm(0)); // unused offset | |||
8898 | TmpInst.addOperand(Inst.getOperand(2)); // CondCode | |||
8899 | TmpInst.addOperand(Inst.getOperand(3)); // CondCode | |||
8900 | Inst = TmpInst; | |||
8901 | return true; | |||
8902 | } | |||
8903 | // Handle NEON VST complex aliases. | |||
8904 | case ARM::VST1LNdWB_register_Asm_8: | |||
8905 | case ARM::VST1LNdWB_register_Asm_16: | |||
8906 | case ARM::VST1LNdWB_register_Asm_32: { | |||
8907 | MCInst TmpInst; | |||
8908 | // Shuffle the operands around so the lane index operand is in the | |||
8909 | // right place. | |||
8910 | unsigned Spacing; | |||
8911 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); | |||
8912 | TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb | |||
8913 | TmpInst.addOperand(Inst.getOperand(2)); // Rn | |||
8914 | TmpInst.addOperand(Inst.getOperand(3)); // alignment | |||
8915 | TmpInst.addOperand(Inst.getOperand(4)); // Rm | |||
8916 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
8917 | TmpInst.addOperand(Inst.getOperand(1)); // lane | |||
8918 | TmpInst.addOperand(Inst.getOperand(5)); // CondCode | |||
8919 | TmpInst.addOperand(Inst.getOperand(6)); | |||
8920 | Inst = TmpInst; | |||
8921 | return true; | |||
8922 | } | |||
8923 | ||||
8924 | case ARM::VST2LNdWB_register_Asm_8: | |||
8925 | case ARM::VST2LNdWB_register_Asm_16: | |||
8926 | case ARM::VST2LNdWB_register_Asm_32: | |||
8927 | case ARM::VST2LNqWB_register_Asm_16: | |||
8928 | case ARM::VST2LNqWB_register_Asm_32: { | |||
8929 | MCInst TmpInst; | |||
8930 | // Shuffle the operands around so the lane index operand is in the | |||
8931 | // right place. | |||
8932 | unsigned Spacing; | |||
8933 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); | |||
8934 | TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb | |||
8935 | TmpInst.addOperand(Inst.getOperand(2)); // Rn | |||
8936 | TmpInst.addOperand(Inst.getOperand(3)); // alignment | |||
8937 | TmpInst.addOperand(Inst.getOperand(4)); // Rm | |||
8938 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
8939 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
8940 | Spacing)); | |||
8941 | TmpInst.addOperand(Inst.getOperand(1)); // lane | |||
8942 | TmpInst.addOperand(Inst.getOperand(5)); // CondCode | |||
8943 | TmpInst.addOperand(Inst.getOperand(6)); | |||
8944 | Inst = TmpInst; | |||
8945 | return true; | |||
8946 | } | |||
8947 | ||||
8948 | case ARM::VST3LNdWB_register_Asm_8: | |||
8949 | case ARM::VST3LNdWB_register_Asm_16: | |||
8950 | case ARM::VST3LNdWB_register_Asm_32: | |||
8951 | case ARM::VST3LNqWB_register_Asm_16: | |||
8952 | case ARM::VST3LNqWB_register_Asm_32: { | |||
8953 | MCInst TmpInst; | |||
8954 | // Shuffle the operands around so the lane index operand is in the | |||
8955 | // right place. | |||
8956 | unsigned Spacing; | |||
8957 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); | |||
8958 | TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb | |||
8959 | TmpInst.addOperand(Inst.getOperand(2)); // Rn | |||
8960 | TmpInst.addOperand(Inst.getOperand(3)); // alignment | |||
8961 | TmpInst.addOperand(Inst.getOperand(4)); // Rm | |||
8962 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
8963 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
8964 | Spacing)); | |||
8965 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
8966 | Spacing * 2)); | |||
8967 | TmpInst.addOperand(Inst.getOperand(1)); // lane | |||
8968 | TmpInst.addOperand(Inst.getOperand(5)); // CondCode | |||
8969 | TmpInst.addOperand(Inst.getOperand(6)); | |||
8970 | Inst = TmpInst; | |||
8971 | return true; | |||
8972 | } | |||
8973 | ||||
8974 | case ARM::VST4LNdWB_register_Asm_8: | |||
8975 | case ARM::VST4LNdWB_register_Asm_16: | |||
8976 | case ARM::VST4LNdWB_register_Asm_32: | |||
8977 | case ARM::VST4LNqWB_register_Asm_16: | |||
8978 | case ARM::VST4LNqWB_register_Asm_32: { | |||
8979 | MCInst TmpInst; | |||
8980 | // Shuffle the operands around so the lane index operand is in the | |||
8981 | // right place. | |||
8982 | unsigned Spacing; | |||
8983 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); | |||
8984 | TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb | |||
8985 | TmpInst.addOperand(Inst.getOperand(2)); // Rn | |||
8986 | TmpInst.addOperand(Inst.getOperand(3)); // alignment | |||
8987 | TmpInst.addOperand(Inst.getOperand(4)); // Rm | |||
8988 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
8989 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
8990 | Spacing)); | |||
8991 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
8992 | Spacing * 2)); | |||
8993 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
8994 | Spacing * 3)); | |||
8995 | TmpInst.addOperand(Inst.getOperand(1)); // lane | |||
8996 | TmpInst.addOperand(Inst.getOperand(5)); // CondCode | |||
8997 | TmpInst.addOperand(Inst.getOperand(6)); | |||
8998 | Inst = TmpInst; | |||
8999 | return true; | |||
9000 | } | |||
9001 | ||||
9002 | case ARM::VST1LNdWB_fixed_Asm_8: | |||
9003 | case ARM::VST1LNdWB_fixed_Asm_16: | |||
9004 | case ARM::VST1LNdWB_fixed_Asm_32: { | |||
9005 | MCInst TmpInst; | |||
9006 | // Shuffle the operands around so the lane index operand is in the | |||
9007 | // right place. | |||
9008 | unsigned Spacing; | |||
9009 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); | |||
9010 | TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb | |||
9011 | TmpInst.addOperand(Inst.getOperand(2)); // Rn | |||
9012 | TmpInst.addOperand(Inst.getOperand(3)); // alignment | |||
9013 | TmpInst.addOperand(MCOperand::createReg(0)); // Rm | |||
9014 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9015 | TmpInst.addOperand(Inst.getOperand(1)); // lane | |||
9016 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode | |||
9017 | TmpInst.addOperand(Inst.getOperand(5)); | |||
9018 | Inst = TmpInst; | |||
9019 | return true; | |||
9020 | } | |||
9021 | ||||
9022 | case ARM::VST2LNdWB_fixed_Asm_8: | |||
9023 | case ARM::VST2LNdWB_fixed_Asm_16: | |||
9024 | case ARM::VST2LNdWB_fixed_Asm_32: | |||
9025 | case ARM::VST2LNqWB_fixed_Asm_16: | |||
9026 | case ARM::VST2LNqWB_fixed_Asm_32: { | |||
9027 | MCInst TmpInst; | |||
9028 | // Shuffle the operands around so the lane index operand is in the | |||
9029 | // right place. | |||
9030 | unsigned Spacing; | |||
9031 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); | |||
9032 | TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb | |||
9033 | TmpInst.addOperand(Inst.getOperand(2)); // Rn | |||
9034 | TmpInst.addOperand(Inst.getOperand(3)); // alignment | |||
9035 | TmpInst.addOperand(MCOperand::createReg(0)); // Rm | |||
9036 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9037 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9038 | Spacing)); | |||
9039 | TmpInst.addOperand(Inst.getOperand(1)); // lane | |||
9040 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode | |||
9041 | TmpInst.addOperand(Inst.getOperand(5)); | |||
9042 | Inst = TmpInst; | |||
9043 | return true; | |||
9044 | } | |||
9045 | ||||
9046 | case ARM::VST3LNdWB_fixed_Asm_8: | |||
9047 | case ARM::VST3LNdWB_fixed_Asm_16: | |||
9048 | case ARM::VST3LNdWB_fixed_Asm_32: | |||
9049 | case ARM::VST3LNqWB_fixed_Asm_16: | |||
9050 | case ARM::VST3LNqWB_fixed_Asm_32: { | |||
9051 | MCInst TmpInst; | |||
9052 | // Shuffle the operands around so the lane index operand is in the | |||
9053 | // right place. | |||
9054 | unsigned Spacing; | |||
9055 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); | |||
9056 | TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb | |||
9057 | TmpInst.addOperand(Inst.getOperand(2)); // Rn | |||
9058 | TmpInst.addOperand(Inst.getOperand(3)); // alignment | |||
9059 | TmpInst.addOperand(MCOperand::createReg(0)); // Rm | |||
9060 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9061 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9062 | Spacing)); | |||
9063 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9064 | Spacing * 2)); | |||
9065 | TmpInst.addOperand(Inst.getOperand(1)); // lane | |||
9066 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode | |||
9067 | TmpInst.addOperand(Inst.getOperand(5)); | |||
9068 | Inst = TmpInst; | |||
9069 | return true; | |||
9070 | } | |||
9071 | ||||
9072 | case ARM::VST4LNdWB_fixed_Asm_8: | |||
9073 | case ARM::VST4LNdWB_fixed_Asm_16: | |||
9074 | case ARM::VST4LNdWB_fixed_Asm_32: | |||
9075 | case ARM::VST4LNqWB_fixed_Asm_16: | |||
9076 | case ARM::VST4LNqWB_fixed_Asm_32: { | |||
9077 | MCInst TmpInst; | |||
9078 | // Shuffle the operands around so the lane index operand is in the | |||
9079 | // right place. | |||
9080 | unsigned Spacing; | |||
9081 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); | |||
9082 | TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb | |||
9083 | TmpInst.addOperand(Inst.getOperand(2)); // Rn | |||
9084 | TmpInst.addOperand(Inst.getOperand(3)); // alignment | |||
9085 | TmpInst.addOperand(MCOperand::createReg(0)); // Rm | |||
9086 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9087 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9088 | Spacing)); | |||
9089 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9090 | Spacing * 2)); | |||
9091 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9092 | Spacing * 3)); | |||
9093 | TmpInst.addOperand(Inst.getOperand(1)); // lane | |||
9094 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode | |||
9095 | TmpInst.addOperand(Inst.getOperand(5)); | |||
9096 | Inst = TmpInst; | |||
9097 | return true; | |||
9098 | } | |||
9099 | ||||
9100 | case ARM::VST1LNdAsm_8: | |||
9101 | case ARM::VST1LNdAsm_16: | |||
9102 | case ARM::VST1LNdAsm_32: { | |||
9103 | MCInst TmpInst; | |||
9104 | // Shuffle the operands around so the lane index operand is in the | |||
9105 | // right place. | |||
9106 | unsigned Spacing; | |||
9107 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); | |||
9108 | TmpInst.addOperand(Inst.getOperand(2)); // Rn | |||
9109 | TmpInst.addOperand(Inst.getOperand(3)); // alignment | |||
9110 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9111 | TmpInst.addOperand(Inst.getOperand(1)); // lane | |||
9112 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode | |||
9113 | TmpInst.addOperand(Inst.getOperand(5)); | |||
9114 | Inst = TmpInst; | |||
9115 | return true; | |||
9116 | } | |||
9117 | ||||
9118 | case ARM::VST2LNdAsm_8: | |||
9119 | case ARM::VST2LNdAsm_16: | |||
9120 | case ARM::VST2LNdAsm_32: | |||
9121 | case ARM::VST2LNqAsm_16: | |||
9122 | case ARM::VST2LNqAsm_32: { | |||
9123 | MCInst TmpInst; | |||
9124 | // Shuffle the operands around so the lane index operand is in the | |||
9125 | // right place. | |||
9126 | unsigned Spacing; | |||
9127 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); | |||
9128 | TmpInst.addOperand(Inst.getOperand(2)); // Rn | |||
9129 | TmpInst.addOperand(Inst.getOperand(3)); // alignment | |||
9130 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9131 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9132 | Spacing)); | |||
9133 | TmpInst.addOperand(Inst.getOperand(1)); // lane | |||
9134 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode | |||
9135 | TmpInst.addOperand(Inst.getOperand(5)); | |||
9136 | Inst = TmpInst; | |||
9137 | return true; | |||
9138 | } | |||
9139 | ||||
9140 | case ARM::VST3LNdAsm_8: | |||
9141 | case ARM::VST3LNdAsm_16: | |||
9142 | case ARM::VST3LNdAsm_32: | |||
9143 | case ARM::VST3LNqAsm_16: | |||
9144 | case ARM::VST3LNqAsm_32: { | |||
9145 | MCInst TmpInst; | |||
9146 | // Shuffle the operands around so the lane index operand is in the | |||
9147 | // right place. | |||
9148 | unsigned Spacing; | |||
9149 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); | |||
9150 | TmpInst.addOperand(Inst.getOperand(2)); // Rn | |||
9151 | TmpInst.addOperand(Inst.getOperand(3)); // alignment | |||
9152 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9153 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9154 | Spacing)); | |||
9155 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9156 | Spacing * 2)); | |||
9157 | TmpInst.addOperand(Inst.getOperand(1)); // lane | |||
9158 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode | |||
9159 | TmpInst.addOperand(Inst.getOperand(5)); | |||
9160 | Inst = TmpInst; | |||
9161 | return true; | |||
9162 | } | |||
9163 | ||||
9164 | case ARM::VST4LNdAsm_8: | |||
9165 | case ARM::VST4LNdAsm_16: | |||
9166 | case ARM::VST4LNdAsm_32: | |||
9167 | case ARM::VST4LNqAsm_16: | |||
9168 | case ARM::VST4LNqAsm_32: { | |||
9169 | MCInst TmpInst; | |||
9170 | // Shuffle the operands around so the lane index operand is in the | |||
9171 | // right place. | |||
9172 | unsigned Spacing; | |||
9173 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); | |||
9174 | TmpInst.addOperand(Inst.getOperand(2)); // Rn | |||
9175 | TmpInst.addOperand(Inst.getOperand(3)); // alignment | |||
9176 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9177 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9178 | Spacing)); | |||
9179 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9180 | Spacing * 2)); | |||
9181 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9182 | Spacing * 3)); | |||
9183 | TmpInst.addOperand(Inst.getOperand(1)); // lane | |||
9184 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode | |||
9185 | TmpInst.addOperand(Inst.getOperand(5)); | |||
9186 | Inst = TmpInst; | |||
9187 | return true; | |||
9188 | } | |||
9189 | ||||
9190 | // Handle NEON VLD complex aliases. | |||
9191 | case ARM::VLD1LNdWB_register_Asm_8: | |||
9192 | case ARM::VLD1LNdWB_register_Asm_16: | |||
9193 | case ARM::VLD1LNdWB_register_Asm_32: { | |||
9194 | MCInst TmpInst; | |||
9195 | // Shuffle the operands around so the lane index operand is in the | |||
9196 | // right place. | |||
9197 | unsigned Spacing; | |||
9198 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); | |||
9199 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9200 | TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb | |||
9201 | TmpInst.addOperand(Inst.getOperand(2)); // Rn | |||
9202 | TmpInst.addOperand(Inst.getOperand(3)); // alignment | |||
9203 | TmpInst.addOperand(Inst.getOperand(4)); // Rm | |||
9204 | TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) | |||
9205 | TmpInst.addOperand(Inst.getOperand(1)); // lane | |||
9206 | TmpInst.addOperand(Inst.getOperand(5)); // CondCode | |||
9207 | TmpInst.addOperand(Inst.getOperand(6)); | |||
9208 | Inst = TmpInst; | |||
9209 | return true; | |||
9210 | } | |||
9211 | ||||
9212 | case ARM::VLD2LNdWB_register_Asm_8: | |||
9213 | case ARM::VLD2LNdWB_register_Asm_16: | |||
9214 | case ARM::VLD2LNdWB_register_Asm_32: | |||
9215 | case ARM::VLD2LNqWB_register_Asm_16: | |||
9216 | case ARM::VLD2LNqWB_register_Asm_32: { | |||
9217 | MCInst TmpInst; | |||
9218 | // Shuffle the operands around so the lane index operand is in the | |||
9219 | // right place. | |||
9220 | unsigned Spacing; | |||
9221 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); | |||
9222 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9223 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9224 | Spacing)); | |||
9225 | TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb | |||
9226 | TmpInst.addOperand(Inst.getOperand(2)); // Rn | |||
9227 | TmpInst.addOperand(Inst.getOperand(3)); // alignment | |||
9228 | TmpInst.addOperand(Inst.getOperand(4)); // Rm | |||
9229 | TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) | |||
9230 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9231 | Spacing)); | |||
9232 | TmpInst.addOperand(Inst.getOperand(1)); // lane | |||
9233 | TmpInst.addOperand(Inst.getOperand(5)); // CondCode | |||
9234 | TmpInst.addOperand(Inst.getOperand(6)); | |||
9235 | Inst = TmpInst; | |||
9236 | return true; | |||
9237 | } | |||
9238 | ||||
9239 | case ARM::VLD3LNdWB_register_Asm_8: | |||
9240 | case ARM::VLD3LNdWB_register_Asm_16: | |||
9241 | case ARM::VLD3LNdWB_register_Asm_32: | |||
9242 | case ARM::VLD3LNqWB_register_Asm_16: | |||
9243 | case ARM::VLD3LNqWB_register_Asm_32: { | |||
9244 | MCInst TmpInst; | |||
9245 | // Shuffle the operands around so the lane index operand is in the | |||
9246 | // right place. | |||
9247 | unsigned Spacing; | |||
9248 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); | |||
9249 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9250 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9251 | Spacing)); | |||
9252 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9253 | Spacing * 2)); | |||
9254 | TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb | |||
9255 | TmpInst.addOperand(Inst.getOperand(2)); // Rn | |||
9256 | TmpInst.addOperand(Inst.getOperand(3)); // alignment | |||
9257 | TmpInst.addOperand(Inst.getOperand(4)); // Rm | |||
9258 | TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) | |||
9259 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9260 | Spacing)); | |||
9261 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9262 | Spacing * 2)); | |||
9263 | TmpInst.addOperand(Inst.getOperand(1)); // lane | |||
9264 | TmpInst.addOperand(Inst.getOperand(5)); // CondCode | |||
9265 | TmpInst.addOperand(Inst.getOperand(6)); | |||
9266 | Inst = TmpInst; | |||
9267 | return true; | |||
9268 | } | |||
9269 | ||||
9270 | case ARM::VLD4LNdWB_register_Asm_8: | |||
9271 | case ARM::VLD4LNdWB_register_Asm_16: | |||
9272 | case ARM::VLD4LNdWB_register_Asm_32: | |||
9273 | case ARM::VLD4LNqWB_register_Asm_16: | |||
9274 | case ARM::VLD4LNqWB_register_Asm_32: { | |||
9275 | MCInst TmpInst; | |||
9276 | // Shuffle the operands around so the lane index operand is in the | |||
9277 | // right place. | |||
9278 | unsigned Spacing; | |||
9279 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); | |||
9280 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9281 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9282 | Spacing)); | |||
9283 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9284 | Spacing * 2)); | |||
9285 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9286 | Spacing * 3)); | |||
9287 | TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb | |||
9288 | TmpInst.addOperand(Inst.getOperand(2)); // Rn | |||
9289 | TmpInst.addOperand(Inst.getOperand(3)); // alignment | |||
9290 | TmpInst.addOperand(Inst.getOperand(4)); // Rm | |||
9291 | TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) | |||
9292 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9293 | Spacing)); | |||
9294 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9295 | Spacing * 2)); | |||
9296 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9297 | Spacing * 3)); | |||
9298 | TmpInst.addOperand(Inst.getOperand(1)); // lane | |||
9299 | TmpInst.addOperand(Inst.getOperand(5)); // CondCode | |||
9300 | TmpInst.addOperand(Inst.getOperand(6)); | |||
9301 | Inst = TmpInst; | |||
9302 | return true; | |||
9303 | } | |||
9304 | ||||
9305 | case ARM::VLD1LNdWB_fixed_Asm_8: | |||
9306 | case ARM::VLD1LNdWB_fixed_Asm_16: | |||
9307 | case ARM::VLD1LNdWB_fixed_Asm_32: { | |||
9308 | MCInst TmpInst; | |||
9309 | // Shuffle the operands around so the lane index operand is in the | |||
9310 | // right place. | |||
9311 | unsigned Spacing; | |||
9312 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); | |||
9313 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9314 | TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb | |||
9315 | TmpInst.addOperand(Inst.getOperand(2)); // Rn | |||
9316 | TmpInst.addOperand(Inst.getOperand(3)); // alignment | |||
9317 | TmpInst.addOperand(MCOperand::createReg(0)); // Rm | |||
9318 | TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) | |||
9319 | TmpInst.addOperand(Inst.getOperand(1)); // lane | |||
9320 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode | |||
9321 | TmpInst.addOperand(Inst.getOperand(5)); | |||
9322 | Inst = TmpInst; | |||
9323 | return true; | |||
9324 | } | |||
9325 | ||||
9326 | case ARM::VLD2LNdWB_fixed_Asm_8: | |||
9327 | case ARM::VLD2LNdWB_fixed_Asm_16: | |||
9328 | case ARM::VLD2LNdWB_fixed_Asm_32: | |||
9329 | case ARM::VLD2LNqWB_fixed_Asm_16: | |||
9330 | case ARM::VLD2LNqWB_fixed_Asm_32: { | |||
9331 | MCInst TmpInst; | |||
9332 | // Shuffle the operands around so the lane index operand is in the | |||
9333 | // right place. | |||
9334 | unsigned Spacing; | |||
9335 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); | |||
9336 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9337 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9338 | Spacing)); | |||
9339 | TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb | |||
9340 | TmpInst.addOperand(Inst.getOperand(2)); // Rn | |||
9341 | TmpInst.addOperand(Inst.getOperand(3)); // alignment | |||
9342 | TmpInst.addOperand(MCOperand::createReg(0)); // Rm | |||
9343 | TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) | |||
9344 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9345 | Spacing)); | |||
9346 | TmpInst.addOperand(Inst.getOperand(1)); // lane | |||
9347 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode | |||
9348 | TmpInst.addOperand(Inst.getOperand(5)); | |||
9349 | Inst = TmpInst; | |||
9350 | return true; | |||
9351 | } | |||
9352 | ||||
9353 | case ARM::VLD3LNdWB_fixed_Asm_8: | |||
9354 | case ARM::VLD3LNdWB_fixed_Asm_16: | |||
9355 | case ARM::VLD3LNdWB_fixed_Asm_32: | |||
9356 | case ARM::VLD3LNqWB_fixed_Asm_16: | |||
9357 | case ARM::VLD3LNqWB_fixed_Asm_32: { | |||
9358 | MCInst TmpInst; | |||
9359 | // Shuffle the operands around so the lane index operand is in the | |||
9360 | // right place. | |||
9361 | unsigned Spacing; | |||
9362 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); | |||
9363 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9364 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9365 | Spacing)); | |||
9366 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9367 | Spacing * 2)); | |||
9368 | TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb | |||
9369 | TmpInst.addOperand(Inst.getOperand(2)); // Rn | |||
9370 | TmpInst.addOperand(Inst.getOperand(3)); // alignment | |||
9371 | TmpInst.addOperand(MCOperand::createReg(0)); // Rm | |||
9372 | TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) | |||
9373 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9374 | Spacing)); | |||
9375 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9376 | Spacing * 2)); | |||
9377 | TmpInst.addOperand(Inst.getOperand(1)); // lane | |||
9378 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode | |||
9379 | TmpInst.addOperand(Inst.getOperand(5)); | |||
9380 | Inst = TmpInst; | |||
9381 | return true; | |||
9382 | } | |||
9383 | ||||
9384 | case ARM::VLD4LNdWB_fixed_Asm_8: | |||
9385 | case ARM::VLD4LNdWB_fixed_Asm_16: | |||
9386 | case ARM::VLD4LNdWB_fixed_Asm_32: | |||
9387 | case ARM::VLD4LNqWB_fixed_Asm_16: | |||
9388 | case ARM::VLD4LNqWB_fixed_Asm_32: { | |||
9389 | MCInst TmpInst; | |||
9390 | // Shuffle the operands around so the lane index operand is in the | |||
9391 | // right place. | |||
9392 | unsigned Spacing; | |||
9393 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); | |||
9394 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9395 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9396 | Spacing)); | |||
9397 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9398 | Spacing * 2)); | |||
9399 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9400 | Spacing * 3)); | |||
9401 | TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb | |||
9402 | TmpInst.addOperand(Inst.getOperand(2)); // Rn | |||
9403 | TmpInst.addOperand(Inst.getOperand(3)); // alignment | |||
9404 | TmpInst.addOperand(MCOperand::createReg(0)); // Rm | |||
9405 | TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) | |||
9406 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9407 | Spacing)); | |||
9408 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9409 | Spacing * 2)); | |||
9410 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9411 | Spacing * 3)); | |||
9412 | TmpInst.addOperand(Inst.getOperand(1)); // lane | |||
9413 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode | |||
9414 | TmpInst.addOperand(Inst.getOperand(5)); | |||
9415 | Inst = TmpInst; | |||
9416 | return true; | |||
9417 | } | |||
9418 | ||||
9419 | case ARM::VLD1LNdAsm_8: | |||
9420 | case ARM::VLD1LNdAsm_16: | |||
9421 | case ARM::VLD1LNdAsm_32: { | |||
9422 | MCInst TmpInst; | |||
9423 | // Shuffle the operands around so the lane index operand is in the | |||
9424 | // right place. | |||
9425 | unsigned Spacing; | |||
9426 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); | |||
9427 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9428 | TmpInst.addOperand(Inst.getOperand(2)); // Rn | |||
9429 | TmpInst.addOperand(Inst.getOperand(3)); // alignment | |||
9430 | TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) | |||
9431 | TmpInst.addOperand(Inst.getOperand(1)); // lane | |||
9432 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode | |||
9433 | TmpInst.addOperand(Inst.getOperand(5)); | |||
9434 | Inst = TmpInst; | |||
9435 | return true; | |||
9436 | } | |||
9437 | ||||
9438 | case ARM::VLD2LNdAsm_8: | |||
9439 | case ARM::VLD2LNdAsm_16: | |||
9440 | case ARM::VLD2LNdAsm_32: | |||
9441 | case ARM::VLD2LNqAsm_16: | |||
9442 | case ARM::VLD2LNqAsm_32: { | |||
9443 | MCInst TmpInst; | |||
9444 | // Shuffle the operands around so the lane index operand is in the | |||
9445 | // right place. | |||
9446 | unsigned Spacing; | |||
9447 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); | |||
9448 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9449 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9450 | Spacing)); | |||
9451 | TmpInst.addOperand(Inst.getOperand(2)); // Rn | |||
9452 | TmpInst.addOperand(Inst.getOperand(3)); // alignment | |||
9453 | TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) | |||
9454 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9455 | Spacing)); | |||
9456 | TmpInst.addOperand(Inst.getOperand(1)); // lane | |||
9457 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode | |||
9458 | TmpInst.addOperand(Inst.getOperand(5)); | |||
9459 | Inst = TmpInst; | |||
9460 | return true; | |||
9461 | } | |||
9462 | ||||
9463 | case ARM::VLD3LNdAsm_8: | |||
9464 | case ARM::VLD3LNdAsm_16: | |||
9465 | case ARM::VLD3LNdAsm_32: | |||
9466 | case ARM::VLD3LNqAsm_16: | |||
9467 | case ARM::VLD3LNqAsm_32: { | |||
9468 | MCInst TmpInst; | |||
9469 | // Shuffle the operands around so the lane index operand is in the | |||
9470 | // right place. | |||
9471 | unsigned Spacing; | |||
9472 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); | |||
9473 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9474 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9475 | Spacing)); | |||
9476 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9477 | Spacing * 2)); | |||
9478 | TmpInst.addOperand(Inst.getOperand(2)); // Rn | |||
9479 | TmpInst.addOperand(Inst.getOperand(3)); // alignment | |||
9480 | TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) | |||
9481 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9482 | Spacing)); | |||
9483 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9484 | Spacing * 2)); | |||
9485 | TmpInst.addOperand(Inst.getOperand(1)); // lane | |||
9486 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode | |||
9487 | TmpInst.addOperand(Inst.getOperand(5)); | |||
9488 | Inst = TmpInst; | |||
9489 | return true; | |||
9490 | } | |||
9491 | ||||
9492 | case ARM::VLD4LNdAsm_8: | |||
9493 | case ARM::VLD4LNdAsm_16: | |||
9494 | case ARM::VLD4LNdAsm_32: | |||
9495 | case ARM::VLD4LNqAsm_16: | |||
9496 | case ARM::VLD4LNqAsm_32: { | |||
9497 | MCInst TmpInst; | |||
9498 | // Shuffle the operands around so the lane index operand is in the | |||
9499 | // right place. | |||
9500 | unsigned Spacing; | |||
9501 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); | |||
9502 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9503 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9504 | Spacing)); | |||
9505 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9506 | Spacing * 2)); | |||
9507 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9508 | Spacing * 3)); | |||
9509 | TmpInst.addOperand(Inst.getOperand(2)); // Rn | |||
9510 | TmpInst.addOperand(Inst.getOperand(3)); // alignment | |||
9511 | TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) | |||
9512 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9513 | Spacing)); | |||
9514 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9515 | Spacing * 2)); | |||
9516 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9517 | Spacing * 3)); | |||
9518 | TmpInst.addOperand(Inst.getOperand(1)); // lane | |||
9519 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode | |||
9520 | TmpInst.addOperand(Inst.getOperand(5)); | |||
9521 | Inst = TmpInst; | |||
9522 | return true; | |||
9523 | } | |||
9524 | ||||
9525 | // VLD3DUP single 3-element structure to all lanes instructions. | |||
9526 | case ARM::VLD3DUPdAsm_8: | |||
9527 | case ARM::VLD3DUPdAsm_16: | |||
9528 | case ARM::VLD3DUPdAsm_32: | |||
9529 | case ARM::VLD3DUPqAsm_8: | |||
9530 | case ARM::VLD3DUPqAsm_16: | |||
9531 | case ARM::VLD3DUPqAsm_32: { | |||
9532 | MCInst TmpInst; | |||
9533 | unsigned Spacing; | |||
9534 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); | |||
9535 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9536 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9537 | Spacing)); | |||
9538 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9539 | Spacing * 2)); | |||
9540 | TmpInst.addOperand(Inst.getOperand(1)); // Rn | |||
9541 | TmpInst.addOperand(Inst.getOperand(2)); // alignment | |||
9542 | TmpInst.addOperand(Inst.getOperand(3)); // CondCode | |||
9543 | TmpInst.addOperand(Inst.getOperand(4)); | |||
9544 | Inst = TmpInst; | |||
9545 | return true; | |||
9546 | } | |||
9547 | ||||
9548 | case ARM::VLD3DUPdWB_fixed_Asm_8: | |||
9549 | case ARM::VLD3DUPdWB_fixed_Asm_16: | |||
9550 | case ARM::VLD3DUPdWB_fixed_Asm_32: | |||
9551 | case ARM::VLD3DUPqWB_fixed_Asm_8: | |||
9552 | case ARM::VLD3DUPqWB_fixed_Asm_16: | |||
9553 | case ARM::VLD3DUPqWB_fixed_Asm_32: { | |||
9554 | MCInst TmpInst; | |||
9555 | unsigned Spacing; | |||
9556 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); | |||
9557 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9558 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9559 | Spacing)); | |||
9560 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9561 | Spacing * 2)); | |||
9562 | TmpInst.addOperand(Inst.getOperand(1)); // Rn | |||
9563 | TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn | |||
9564 | TmpInst.addOperand(Inst.getOperand(2)); // alignment | |||
9565 | TmpInst.addOperand(MCOperand::createReg(0)); // Rm | |||
9566 | TmpInst.addOperand(Inst.getOperand(3)); // CondCode | |||
9567 | TmpInst.addOperand(Inst.getOperand(4)); | |||
9568 | Inst = TmpInst; | |||
9569 | return true; | |||
9570 | } | |||
9571 | ||||
9572 | case ARM::VLD3DUPdWB_register_Asm_8: | |||
9573 | case ARM::VLD3DUPdWB_register_Asm_16: | |||
9574 | case ARM::VLD3DUPdWB_register_Asm_32: | |||
9575 | case ARM::VLD3DUPqWB_register_Asm_8: | |||
9576 | case ARM::VLD3DUPqWB_register_Asm_16: | |||
9577 | case ARM::VLD3DUPqWB_register_Asm_32: { | |||
9578 | MCInst TmpInst; | |||
9579 | unsigned Spacing; | |||
9580 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); | |||
9581 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9582 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9583 | Spacing)); | |||
9584 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9585 | Spacing * 2)); | |||
9586 | TmpInst.addOperand(Inst.getOperand(1)); // Rn | |||
9587 | TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn | |||
9588 | TmpInst.addOperand(Inst.getOperand(2)); // alignment | |||
9589 | TmpInst.addOperand(Inst.getOperand(3)); // Rm | |||
9590 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode | |||
9591 | TmpInst.addOperand(Inst.getOperand(5)); | |||
9592 | Inst = TmpInst; | |||
9593 | return true; | |||
9594 | } | |||
9595 | ||||
9596 | // VLD3 multiple 3-element structure instructions. | |||
9597 | case ARM::VLD3dAsm_8: | |||
9598 | case ARM::VLD3dAsm_16: | |||
9599 | case ARM::VLD3dAsm_32: | |||
9600 | case ARM::VLD3qAsm_8: | |||
9601 | case ARM::VLD3qAsm_16: | |||
9602 | case ARM::VLD3qAsm_32: { | |||
9603 | MCInst TmpInst; | |||
9604 | unsigned Spacing; | |||
9605 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); | |||
9606 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9607 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9608 | Spacing)); | |||
9609 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9610 | Spacing * 2)); | |||
9611 | TmpInst.addOperand(Inst.getOperand(1)); // Rn | |||
9612 | TmpInst.addOperand(Inst.getOperand(2)); // alignment | |||
9613 | TmpInst.addOperand(Inst.getOperand(3)); // CondCode | |||
9614 | TmpInst.addOperand(Inst.getOperand(4)); | |||
9615 | Inst = TmpInst; | |||
9616 | return true; | |||
9617 | } | |||
9618 | ||||
9619 | case ARM::VLD3dWB_fixed_Asm_8: | |||
9620 | case ARM::VLD3dWB_fixed_Asm_16: | |||
9621 | case ARM::VLD3dWB_fixed_Asm_32: | |||
9622 | case ARM::VLD3qWB_fixed_Asm_8: | |||
9623 | case ARM::VLD3qWB_fixed_Asm_16: | |||
9624 | case ARM::VLD3qWB_fixed_Asm_32: { | |||
9625 | MCInst TmpInst; | |||
9626 | unsigned Spacing; | |||
9627 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); | |||
9628 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9629 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9630 | Spacing)); | |||
9631 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9632 | Spacing * 2)); | |||
9633 | TmpInst.addOperand(Inst.getOperand(1)); // Rn | |||
9634 | TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn | |||
9635 | TmpInst.addOperand(Inst.getOperand(2)); // alignment | |||
9636 | TmpInst.addOperand(MCOperand::createReg(0)); // Rm | |||
9637 | TmpInst.addOperand(Inst.getOperand(3)); // CondCode | |||
9638 | TmpInst.addOperand(Inst.getOperand(4)); | |||
9639 | Inst = TmpInst; | |||
9640 | return true; | |||
9641 | } | |||
9642 | ||||
9643 | case ARM::VLD3dWB_register_Asm_8: | |||
9644 | case ARM::VLD3dWB_register_Asm_16: | |||
9645 | case ARM::VLD3dWB_register_Asm_32: | |||
9646 | case ARM::VLD3qWB_register_Asm_8: | |||
9647 | case ARM::VLD3qWB_register_Asm_16: | |||
9648 | case ARM::VLD3qWB_register_Asm_32: { | |||
9649 | MCInst TmpInst; | |||
9650 | unsigned Spacing; | |||
9651 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); | |||
9652 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9653 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9654 | Spacing)); | |||
9655 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9656 | Spacing * 2)); | |||
9657 | TmpInst.addOperand(Inst.getOperand(1)); // Rn | |||
9658 | TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn | |||
9659 | TmpInst.addOperand(Inst.getOperand(2)); // alignment | |||
9660 | TmpInst.addOperand(Inst.getOperand(3)); // Rm | |||
9661 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode | |||
9662 | TmpInst.addOperand(Inst.getOperand(5)); | |||
9663 | Inst = TmpInst; | |||
9664 | return true; | |||
9665 | } | |||
9666 | ||||
9667 | // VLD4DUP single 3-element structure to all lanes instructions. | |||
9668 | case ARM::VLD4DUPdAsm_8: | |||
9669 | case ARM::VLD4DUPdAsm_16: | |||
9670 | case ARM::VLD4DUPdAsm_32: | |||
9671 | case ARM::VLD4DUPqAsm_8: | |||
9672 | case ARM::VLD4DUPqAsm_16: | |||
9673 | case ARM::VLD4DUPqAsm_32: { | |||
9674 | MCInst TmpInst; | |||
9675 | unsigned Spacing; | |||
9676 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); | |||
9677 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9678 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9679 | Spacing)); | |||
9680 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9681 | Spacing * 2)); | |||
9682 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9683 | Spacing * 3)); | |||
9684 | TmpInst.addOperand(Inst.getOperand(1)); // Rn | |||
9685 | TmpInst.addOperand(Inst.getOperand(2)); // alignment | |||
9686 | TmpInst.addOperand(Inst.getOperand(3)); // CondCode | |||
9687 | TmpInst.addOperand(Inst.getOperand(4)); | |||
9688 | Inst = TmpInst; | |||
9689 | return true; | |||
9690 | } | |||
9691 | ||||
9692 | case ARM::VLD4DUPdWB_fixed_Asm_8: | |||
9693 | case ARM::VLD4DUPdWB_fixed_Asm_16: | |||
9694 | case ARM::VLD4DUPdWB_fixed_Asm_32: | |||
9695 | case ARM::VLD4DUPqWB_fixed_Asm_8: | |||
9696 | case ARM::VLD4DUPqWB_fixed_Asm_16: | |||
9697 | case ARM::VLD4DUPqWB_fixed_Asm_32: { | |||
9698 | MCInst TmpInst; | |||
9699 | unsigned Spacing; | |||
9700 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); | |||
9701 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9702 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9703 | Spacing)); | |||
9704 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9705 | Spacing * 2)); | |||
9706 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9707 | Spacing * 3)); | |||
9708 | TmpInst.addOperand(Inst.getOperand(1)); // Rn | |||
9709 | TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn | |||
9710 | TmpInst.addOperand(Inst.getOperand(2)); // alignment | |||
9711 | TmpInst.addOperand(MCOperand::createReg(0)); // Rm | |||
9712 | TmpInst.addOperand(Inst.getOperand(3)); // CondCode | |||
9713 | TmpInst.addOperand(Inst.getOperand(4)); | |||
9714 | Inst = TmpInst; | |||
9715 | return true; | |||
9716 | } | |||
9717 | ||||
9718 | case ARM::VLD4DUPdWB_register_Asm_8: | |||
9719 | case ARM::VLD4DUPdWB_register_Asm_16: | |||
9720 | case ARM::VLD4DUPdWB_register_Asm_32: | |||
9721 | case ARM::VLD4DUPqWB_register_Asm_8: | |||
9722 | case ARM::VLD4DUPqWB_register_Asm_16: | |||
9723 | case ARM::VLD4DUPqWB_register_Asm_32: { | |||
9724 | MCInst TmpInst; | |||
9725 | unsigned Spacing; | |||
9726 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); | |||
9727 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9728 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9729 | Spacing)); | |||
9730 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9731 | Spacing * 2)); | |||
9732 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9733 | Spacing * 3)); | |||
9734 | TmpInst.addOperand(Inst.getOperand(1)); // Rn | |||
9735 | TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn | |||
9736 | TmpInst.addOperand(Inst.getOperand(2)); // alignment | |||
9737 | TmpInst.addOperand(Inst.getOperand(3)); // Rm | |||
9738 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode | |||
9739 | TmpInst.addOperand(Inst.getOperand(5)); | |||
9740 | Inst = TmpInst; | |||
9741 | return true; | |||
9742 | } | |||
9743 | ||||
9744 | // VLD4 multiple 4-element structure instructions. | |||
9745 | case ARM::VLD4dAsm_8: | |||
9746 | case ARM::VLD4dAsm_16: | |||
9747 | case ARM::VLD4dAsm_32: | |||
9748 | case ARM::VLD4qAsm_8: | |||
9749 | case ARM::VLD4qAsm_16: | |||
9750 | case ARM::VLD4qAsm_32: { | |||
9751 | MCInst TmpInst; | |||
9752 | unsigned Spacing; | |||
9753 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); | |||
9754 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9755 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9756 | Spacing)); | |||
9757 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9758 | Spacing * 2)); | |||
9759 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9760 | Spacing * 3)); | |||
9761 | TmpInst.addOperand(Inst.getOperand(1)); // Rn | |||
9762 | TmpInst.addOperand(Inst.getOperand(2)); // alignment | |||
9763 | TmpInst.addOperand(Inst.getOperand(3)); // CondCode | |||
9764 | TmpInst.addOperand(Inst.getOperand(4)); | |||
9765 | Inst = TmpInst; | |||
9766 | return true; | |||
9767 | } | |||
9768 | ||||
9769 | case ARM::VLD4dWB_fixed_Asm_8: | |||
9770 | case ARM::VLD4dWB_fixed_Asm_16: | |||
9771 | case ARM::VLD4dWB_fixed_Asm_32: | |||
9772 | case ARM::VLD4qWB_fixed_Asm_8: | |||
9773 | case ARM::VLD4qWB_fixed_Asm_16: | |||
9774 | case ARM::VLD4qWB_fixed_Asm_32: { | |||
9775 | MCInst TmpInst; | |||
9776 | unsigned Spacing; | |||
9777 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); | |||
9778 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9779 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9780 | Spacing)); | |||
9781 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9782 | Spacing * 2)); | |||
9783 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9784 | Spacing * 3)); | |||
9785 | TmpInst.addOperand(Inst.getOperand(1)); // Rn | |||
9786 | TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn | |||
9787 | TmpInst.addOperand(Inst.getOperand(2)); // alignment | |||
9788 | TmpInst.addOperand(MCOperand::createReg(0)); // Rm | |||
9789 | TmpInst.addOperand(Inst.getOperand(3)); // CondCode | |||
9790 | TmpInst.addOperand(Inst.getOperand(4)); | |||
9791 | Inst = TmpInst; | |||
9792 | return true; | |||
9793 | } | |||
9794 | ||||
9795 | case ARM::VLD4dWB_register_Asm_8: | |||
9796 | case ARM::VLD4dWB_register_Asm_16: | |||
9797 | case ARM::VLD4dWB_register_Asm_32: | |||
9798 | case ARM::VLD4qWB_register_Asm_8: | |||
9799 | case ARM::VLD4qWB_register_Asm_16: | |||
9800 | case ARM::VLD4qWB_register_Asm_32: { | |||
9801 | MCInst TmpInst; | |||
9802 | unsigned Spacing; | |||
9803 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); | |||
9804 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9805 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9806 | Spacing)); | |||
9807 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9808 | Spacing * 2)); | |||
9809 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9810 | Spacing * 3)); | |||
9811 | TmpInst.addOperand(Inst.getOperand(1)); // Rn | |||
9812 | TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn | |||
9813 | TmpInst.addOperand(Inst.getOperand(2)); // alignment | |||
9814 | TmpInst.addOperand(Inst.getOperand(3)); // Rm | |||
9815 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode | |||
9816 | TmpInst.addOperand(Inst.getOperand(5)); | |||
9817 | Inst = TmpInst; | |||
9818 | return true; | |||
9819 | } | |||
9820 | ||||
9821 | // VST3 multiple 3-element structure instructions. | |||
9822 | case ARM::VST3dAsm_8: | |||
9823 | case ARM::VST3dAsm_16: | |||
9824 | case ARM::VST3dAsm_32: | |||
9825 | case ARM::VST3qAsm_8: | |||
9826 | case ARM::VST3qAsm_16: | |||
9827 | case ARM::VST3qAsm_32: { | |||
9828 | MCInst TmpInst; | |||
9829 | unsigned Spacing; | |||
9830 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); | |||
9831 | TmpInst.addOperand(Inst.getOperand(1)); // Rn | |||
9832 | TmpInst.addOperand(Inst.getOperand(2)); // alignment | |||
9833 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9834 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9835 | Spacing)); | |||
9836 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9837 | Spacing * 2)); | |||
9838 | TmpInst.addOperand(Inst.getOperand(3)); // CondCode | |||
9839 | TmpInst.addOperand(Inst.getOperand(4)); | |||
9840 | Inst = TmpInst; | |||
9841 | return true; | |||
9842 | } | |||
9843 | ||||
9844 | case ARM::VST3dWB_fixed_Asm_8: | |||
9845 | case ARM::VST3dWB_fixed_Asm_16: | |||
9846 | case ARM::VST3dWB_fixed_Asm_32: | |||
9847 | case ARM::VST3qWB_fixed_Asm_8: | |||
9848 | case ARM::VST3qWB_fixed_Asm_16: | |||
9849 | case ARM::VST3qWB_fixed_Asm_32: { | |||
9850 | MCInst TmpInst; | |||
9851 | unsigned Spacing; | |||
9852 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); | |||
9853 | TmpInst.addOperand(Inst.getOperand(1)); // Rn | |||
9854 | TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn | |||
9855 | TmpInst.addOperand(Inst.getOperand(2)); // alignment | |||
9856 | TmpInst.addOperand(MCOperand::createReg(0)); // Rm | |||
9857 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9858 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9859 | Spacing)); | |||
9860 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9861 | Spacing * 2)); | |||
9862 | TmpInst.addOperand(Inst.getOperand(3)); // CondCode | |||
9863 | TmpInst.addOperand(Inst.getOperand(4)); | |||
9864 | Inst = TmpInst; | |||
9865 | return true; | |||
9866 | } | |||
9867 | ||||
9868 | case ARM::VST3dWB_register_Asm_8: | |||
9869 | case ARM::VST3dWB_register_Asm_16: | |||
9870 | case ARM::VST3dWB_register_Asm_32: | |||
9871 | case ARM::VST3qWB_register_Asm_8: | |||
9872 | case ARM::VST3qWB_register_Asm_16: | |||
9873 | case ARM::VST3qWB_register_Asm_32: { | |||
9874 | MCInst TmpInst; | |||
9875 | unsigned Spacing; | |||
9876 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); | |||
9877 | TmpInst.addOperand(Inst.getOperand(1)); // Rn | |||
9878 | TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn | |||
9879 | TmpInst.addOperand(Inst.getOperand(2)); // alignment | |||
9880 | TmpInst.addOperand(Inst.getOperand(3)); // Rm | |||
9881 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9882 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9883 | Spacing)); | |||
9884 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9885 | Spacing * 2)); | |||
9886 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode | |||
9887 | TmpInst.addOperand(Inst.getOperand(5)); | |||
9888 | Inst = TmpInst; | |||
9889 | return true; | |||
9890 | } | |||
9891 | ||||
9892 | // VST4 multiple 3-element structure instructions. | |||
9893 | case ARM::VST4dAsm_8: | |||
9894 | case ARM::VST4dAsm_16: | |||
9895 | case ARM::VST4dAsm_32: | |||
9896 | case ARM::VST4qAsm_8: | |||
9897 | case ARM::VST4qAsm_16: | |||
9898 | case ARM::VST4qAsm_32: { | |||
9899 | MCInst TmpInst; | |||
9900 | unsigned Spacing; | |||
9901 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); | |||
9902 | TmpInst.addOperand(Inst.getOperand(1)); // Rn | |||
9903 | TmpInst.addOperand(Inst.getOperand(2)); // alignment | |||
9904 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9905 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9906 | Spacing)); | |||
9907 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9908 | Spacing * 2)); | |||
9909 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9910 | Spacing * 3)); | |||
9911 | TmpInst.addOperand(Inst.getOperand(3)); // CondCode | |||
9912 | TmpInst.addOperand(Inst.getOperand(4)); | |||
9913 | Inst = TmpInst; | |||
9914 | return true; | |||
9915 | } | |||
9916 | ||||
9917 | case ARM::VST4dWB_fixed_Asm_8: | |||
9918 | case ARM::VST4dWB_fixed_Asm_16: | |||
9919 | case ARM::VST4dWB_fixed_Asm_32: | |||
9920 | case ARM::VST4qWB_fixed_Asm_8: | |||
9921 | case ARM::VST4qWB_fixed_Asm_16: | |||
9922 | case ARM::VST4qWB_fixed_Asm_32: { | |||
9923 | MCInst TmpInst; | |||
9924 | unsigned Spacing; | |||
9925 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); | |||
9926 | TmpInst.addOperand(Inst.getOperand(1)); // Rn | |||
9927 | TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn | |||
9928 | TmpInst.addOperand(Inst.getOperand(2)); // alignment | |||
9929 | TmpInst.addOperand(MCOperand::createReg(0)); // Rm | |||
9930 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9931 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9932 | Spacing)); | |||
9933 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9934 | Spacing * 2)); | |||
9935 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9936 | Spacing * 3)); | |||
9937 | TmpInst.addOperand(Inst.getOperand(3)); // CondCode | |||
9938 | TmpInst.addOperand(Inst.getOperand(4)); | |||
9939 | Inst = TmpInst; | |||
9940 | return true; | |||
9941 | } | |||
9942 | ||||
9943 | case ARM::VST4dWB_register_Asm_8: | |||
9944 | case ARM::VST4dWB_register_Asm_16: | |||
9945 | case ARM::VST4dWB_register_Asm_32: | |||
9946 | case ARM::VST4qWB_register_Asm_8: | |||
9947 | case ARM::VST4qWB_register_Asm_16: | |||
9948 | case ARM::VST4qWB_register_Asm_32: { | |||
9949 | MCInst TmpInst; | |||
9950 | unsigned Spacing; | |||
9951 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); | |||
9952 | TmpInst.addOperand(Inst.getOperand(1)); // Rn | |||
9953 | TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn | |||
9954 | TmpInst.addOperand(Inst.getOperand(2)); // alignment | |||
9955 | TmpInst.addOperand(Inst.getOperand(3)); // Rm | |||
9956 | TmpInst.addOperand(Inst.getOperand(0)); // Vd | |||
9957 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9958 | Spacing)); | |||
9959 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9960 | Spacing * 2)); | |||
9961 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + | |||
9962 | Spacing * 3)); | |||
9963 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode | |||
9964 | TmpInst.addOperand(Inst.getOperand(5)); | |||
9965 | Inst = TmpInst; | |||
9966 | return true; | |||
9967 | } | |||
9968 | ||||
9969 | // Handle encoding choice for the shift-immediate instructions. | |||
9970 | case ARM::t2LSLri: | |||
9971 | case ARM::t2LSRri: | |||
9972 | case ARM::t2ASRri: | |||
9973 | if (isARMLowRegister(Inst.getOperand(0).getReg()) && | |||
9974 | isARMLowRegister(Inst.getOperand(1).getReg()) && | |||
9975 | Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) && | |||
9976 | !HasWideQualifier) { | |||
9977 | unsigned NewOpc; | |||
9978 | switch (Inst.getOpcode()) { | |||
9979 | default: llvm_unreachable("unexpected opcode")::llvm::llvm_unreachable_internal("unexpected opcode", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 9979); | |||
9980 | case ARM::t2LSLri: NewOpc = ARM::tLSLri; break; | |||
9981 | case ARM::t2LSRri: NewOpc = ARM::tLSRri; break; | |||
9982 | case ARM::t2ASRri: NewOpc = ARM::tASRri; break; | |||
9983 | } | |||
9984 | // The Thumb1 operands aren't in the same order. Awesome, eh? | |||
9985 | MCInst TmpInst; | |||
9986 | TmpInst.setOpcode(NewOpc); | |||
9987 | TmpInst.addOperand(Inst.getOperand(0)); | |||
9988 | TmpInst.addOperand(Inst.getOperand(5)); | |||
9989 | TmpInst.addOperand(Inst.getOperand(1)); | |||
9990 | TmpInst.addOperand(Inst.getOperand(2)); | |||
9991 | TmpInst.addOperand(Inst.getOperand(3)); | |||
9992 | TmpInst.addOperand(Inst.getOperand(4)); | |||
9993 | Inst = TmpInst; | |||
9994 | return true; | |||
9995 | } | |||
9996 | return false; | |||
9997 | ||||
9998 | // Handle the Thumb2 mode MOV complex aliases. | |||
9999 | case ARM::t2MOVsr: | |||
10000 | case ARM::t2MOVSsr: { | |||
10001 | // Which instruction to expand to depends on the CCOut operand and | |||
10002 | // whether we're in an IT block if the register operands are low | |||
10003 | // registers. | |||
10004 | bool isNarrow = false; | |||
10005 | if (isARMLowRegister(Inst.getOperand(0).getReg()) && | |||
10006 | isARMLowRegister(Inst.getOperand(1).getReg()) && | |||
10007 | isARMLowRegister(Inst.getOperand(2).getReg()) && | |||
10008 | Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() && | |||
10009 | inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr) && | |||
10010 | !HasWideQualifier) | |||
10011 | isNarrow = true; | |||
10012 | MCInst TmpInst; | |||
10013 | unsigned newOpc; | |||
10014 | switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) { | |||
10015 | default: llvm_unreachable("unexpected opcode!")::llvm::llvm_unreachable_internal("unexpected opcode!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 10015); | |||
10016 | case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break; | |||
10017 | case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break; | |||
10018 | case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break; | |||
10019 | case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR : ARM::t2RORrr; break; | |||
10020 | } | |||
10021 | TmpInst.setOpcode(newOpc); | |||
10022 | TmpInst.addOperand(Inst.getOperand(0)); // Rd | |||
10023 | if (isNarrow) | |||
10024 | TmpInst.addOperand(MCOperand::createReg( | |||
10025 | Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0)); | |||
10026 | TmpInst.addOperand(Inst.getOperand(1)); // Rn | |||
10027 | TmpInst.addOperand(Inst.getOperand(2)); // Rm | |||
10028 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode | |||
10029 | TmpInst.addOperand(Inst.getOperand(5)); | |||
10030 | if (!isNarrow) | |||
10031 | TmpInst.addOperand(MCOperand::createReg( | |||
10032 | Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0)); | |||
10033 | Inst = TmpInst; | |||
10034 | return true; | |||
10035 | } | |||
10036 | case ARM::t2MOVsi: | |||
10037 | case ARM::t2MOVSsi: { | |||
10038 | // Which instruction to expand to depends on the CCOut operand and | |||
10039 | // whether we're in an IT block if the register operands are low | |||
10040 | // registers. | |||
10041 | bool isNarrow = false; | |||
10042 | if (isARMLowRegister(Inst.getOperand(0).getReg()) && | |||
10043 | isARMLowRegister(Inst.getOperand(1).getReg()) && | |||
10044 | inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi) && | |||
10045 | !HasWideQualifier) | |||
10046 | isNarrow = true; | |||
10047 | MCInst TmpInst; | |||
10048 | unsigned newOpc; | |||
10049 | unsigned Shift = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm()); | |||
10050 | unsigned Amount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()); | |||
10051 | bool isMov = false; | |||
10052 | // MOV rd, rm, LSL #0 is actually a MOV instruction | |||
10053 | if (Shift == ARM_AM::lsl && Amount == 0) { | |||
10054 | isMov = true; | |||
10055 | // The 16-bit encoding of MOV rd, rm, LSL #N is explicitly encoding T2 of | |||
10056 | // MOV (register) in the ARMv8-A and ARMv8-M manuals, and immediate 0 is | |||
10057 | // unpredictable in an IT block so the 32-bit encoding T3 has to be used | |||
10058 | // instead. | |||
10059 | if (inITBlock()) { | |||
10060 | isNarrow = false; | |||
10061 | } | |||
10062 | newOpc = isNarrow ? ARM::tMOVSr : ARM::t2MOVr; | |||
10063 | } else { | |||
10064 | switch(Shift) { | |||
10065 | default: llvm_unreachable("unexpected opcode!")::llvm::llvm_unreachable_internal("unexpected opcode!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 10065); | |||
10066 | case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break; | |||
10067 | case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break; | |||
10068 | case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break; | |||
10069 | case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break; | |||
10070 | case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break; | |||
10071 | } | |||
10072 | } | |||
10073 | if (Amount == 32) Amount = 0; | |||
10074 | TmpInst.setOpcode(newOpc); | |||
10075 | TmpInst.addOperand(Inst.getOperand(0)); // Rd | |||
10076 | if (isNarrow && !isMov) | |||
10077 | TmpInst.addOperand(MCOperand::createReg( | |||
10078 | Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0)); | |||
10079 | TmpInst.addOperand(Inst.getOperand(1)); // Rn | |||
10080 | if (newOpc != ARM::t2RRX && !isMov) | |||
10081 | TmpInst.addOperand(MCOperand::createImm(Amount)); | |||
10082 | TmpInst.addOperand(Inst.getOperand(3)); // CondCode | |||
10083 | TmpInst.addOperand(Inst.getOperand(4)); | |||
10084 | if (!isNarrow) | |||
10085 | TmpInst.addOperand(MCOperand::createReg( | |||
10086 | Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0)); | |||
10087 | Inst = TmpInst; | |||
10088 | return true; | |||
10089 | } | |||
10090 | // Handle the ARM mode MOV complex aliases. | |||
10091 | case ARM::ASRr: | |||
10092 | case ARM::LSRr: | |||
10093 | case ARM::LSLr: | |||
10094 | case ARM::RORr: { | |||
10095 | ARM_AM::ShiftOpc ShiftTy; | |||
10096 | switch(Inst.getOpcode()) { | |||
10097 | default: llvm_unreachable("unexpected opcode!")::llvm::llvm_unreachable_internal("unexpected opcode!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 10097); | |||
10098 | case ARM::ASRr: ShiftTy = ARM_AM::asr; break; | |||
10099 | case ARM::LSRr: ShiftTy = ARM_AM::lsr; break; | |||
10100 | case ARM::LSLr: ShiftTy = ARM_AM::lsl; break; | |||
10101 | case ARM::RORr: ShiftTy = ARM_AM::ror; break; | |||
10102 | } | |||
10103 | unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0); | |||
10104 | MCInst TmpInst; | |||
10105 | TmpInst.setOpcode(ARM::MOVsr); | |||
10106 | TmpInst.addOperand(Inst.getOperand(0)); // Rd | |||
10107 | TmpInst.addOperand(Inst.getOperand(1)); // Rn | |||
10108 | TmpInst.addOperand(Inst.getOperand(2)); // Rm | |||
10109 | TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty | |||
10110 | TmpInst.addOperand(Inst.getOperand(3)); // CondCode | |||
10111 | TmpInst.addOperand(Inst.getOperand(4)); | |||
10112 | TmpInst.addOperand(Inst.getOperand(5)); // cc_out | |||
10113 | Inst = TmpInst; | |||
10114 | return true; | |||
10115 | } | |||
10116 | case ARM::ASRi: | |||
10117 | case ARM::LSRi: | |||
10118 | case ARM::LSLi: | |||
10119 | case ARM::RORi: { | |||
10120 | ARM_AM::ShiftOpc ShiftTy; | |||
10121 | switch(Inst.getOpcode()) { | |||
10122 | default: llvm_unreachable("unexpected opcode!")::llvm::llvm_unreachable_internal("unexpected opcode!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 10122); | |||
10123 | case ARM::ASRi: ShiftTy = ARM_AM::asr; break; | |||
10124 | case ARM::LSRi: ShiftTy = ARM_AM::lsr; break; | |||
10125 | case ARM::LSLi: ShiftTy = ARM_AM::lsl; break; | |||
10126 | case ARM::RORi: ShiftTy = ARM_AM::ror; break; | |||
10127 | } | |||
10128 | // A shift by zero is a plain MOVr, not a MOVsi. | |||
10129 | unsigned Amt = Inst.getOperand(2).getImm(); | |||
10130 | unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi; | |||
10131 | // A shift by 32 should be encoded as 0 when permitted | |||
10132 | if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr)) | |||
10133 | Amt = 0; | |||
10134 | unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt); | |||
10135 | MCInst TmpInst; | |||
10136 | TmpInst.setOpcode(Opc); | |||
10137 | TmpInst.addOperand(Inst.getOperand(0)); // Rd | |||
10138 | TmpInst.addOperand(Inst.getOperand(1)); // Rn | |||
10139 | if (Opc == ARM::MOVsi) | |||
10140 | TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty | |||
10141 | TmpInst.addOperand(Inst.getOperand(3)); // CondCode | |||
10142 | TmpInst.addOperand(Inst.getOperand(4)); | |||
10143 | TmpInst.addOperand(Inst.getOperand(5)); // cc_out | |||
10144 | Inst = TmpInst; | |||
10145 | return true; | |||
10146 | } | |||
10147 | case ARM::RRXi: { | |||
10148 | unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0); | |||
10149 | MCInst TmpInst; | |||
10150 | TmpInst.setOpcode(ARM::MOVsi); | |||
10151 | TmpInst.addOperand(Inst.getOperand(0)); // Rd | |||
10152 | TmpInst.addOperand(Inst.getOperand(1)); // Rn | |||
10153 | TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty | |||
10154 | TmpInst.addOperand(Inst.getOperand(2)); // CondCode | |||
10155 | TmpInst.addOperand(Inst.getOperand(3)); | |||
10156 | TmpInst.addOperand(Inst.getOperand(4)); // cc_out | |||
10157 | Inst = TmpInst; | |||
10158 | return true; | |||
10159 | } | |||
10160 | case ARM::t2LDMIA_UPD: { | |||
10161 | // If this is a load of a single register, then we should use | |||
10162 | // a post-indexed LDR instruction instead, per the ARM ARM. | |||
10163 | if (Inst.getNumOperands() != 5) | |||
10164 | return false; | |||
10165 | MCInst TmpInst; | |||
10166 | TmpInst.setOpcode(ARM::t2LDR_POST); | |||
10167 | TmpInst.addOperand(Inst.getOperand(4)); // Rt | |||
10168 | TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb | |||
10169 | TmpInst.addOperand(Inst.getOperand(1)); // Rn | |||
10170 | TmpInst.addOperand(MCOperand::createImm(4)); | |||
10171 | TmpInst.addOperand(Inst.getOperand(2)); // CondCode | |||
10172 | TmpInst.addOperand(Inst.getOperand(3)); | |||
10173 | Inst = TmpInst; | |||
10174 | return true; | |||
10175 | } | |||
10176 | case ARM::t2STMDB_UPD: { | |||
10177 | // If this is a store of a single register, then we should use | |||
10178 | // a pre-indexed STR instruction instead, per the ARM ARM. | |||
10179 | if (Inst.getNumOperands() != 5) | |||
10180 | return false; | |||
10181 | MCInst TmpInst; | |||
10182 | TmpInst.setOpcode(ARM::t2STR_PRE); | |||
10183 | TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb | |||
10184 | TmpInst.addOperand(Inst.getOperand(4)); // Rt | |||
10185 | TmpInst.addOperand(Inst.getOperand(1)); // Rn | |||
10186 | TmpInst.addOperand(MCOperand::createImm(-4)); | |||
10187 | TmpInst.addOperand(Inst.getOperand(2)); // CondCode | |||
10188 | TmpInst.addOperand(Inst.getOperand(3)); | |||
10189 | Inst = TmpInst; | |||
10190 | return true; | |||
10191 | } | |||
10192 | case ARM::LDMIA_UPD: | |||
10193 | // If this is a load of a single register via a 'pop', then we should use | |||
10194 | // a post-indexed LDR instruction instead, per the ARM ARM. | |||
10195 | if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "pop" && | |||
10196 | Inst.getNumOperands() == 5) { | |||
10197 | MCInst TmpInst; | |||
10198 | TmpInst.setOpcode(ARM::LDR_POST_IMM); | |||
10199 | TmpInst.addOperand(Inst.getOperand(4)); // Rt | |||
10200 | TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb | |||
10201 | TmpInst.addOperand(Inst.getOperand(1)); // Rn | |||
10202 | TmpInst.addOperand(MCOperand::createReg(0)); // am2offset | |||
10203 | TmpInst.addOperand(MCOperand::createImm(4)); | |||
10204 | TmpInst.addOperand(Inst.getOperand(2)); // CondCode | |||
10205 | TmpInst.addOperand(Inst.getOperand(3)); | |||
10206 | Inst = TmpInst; | |||
10207 | return true; | |||
10208 | } | |||
10209 | break; | |||
10210 | case ARM::STMDB_UPD: | |||
10211 | // If this is a store of a single register via a 'push', then we should use | |||
10212 | // a pre-indexed STR instruction instead, per the ARM ARM. | |||
10213 | if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "push" && | |||
10214 | Inst.getNumOperands() == 5) { | |||
10215 | MCInst TmpInst; | |||
10216 | TmpInst.setOpcode(ARM::STR_PRE_IMM); | |||
10217 | TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb | |||
10218 | TmpInst.addOperand(Inst.getOperand(4)); // Rt | |||
10219 | TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12 | |||
10220 | TmpInst.addOperand(MCOperand::createImm(-4)); | |||
10221 | TmpInst.addOperand(Inst.getOperand(2)); // CondCode | |||
10222 | TmpInst.addOperand(Inst.getOperand(3)); | |||
10223 | Inst = TmpInst; | |||
10224 | } | |||
10225 | break; | |||
10226 | case ARM::t2ADDri12: | |||
10227 | case ARM::t2SUBri12: | |||
10228 | case ARM::t2ADDspImm12: | |||
10229 | case ARM::t2SUBspImm12: { | |||
10230 | // If the immediate fits for encoding T3 and the generic | |||
10231 | // mnemonic was used, encoding T3 is preferred. | |||
10232 | const StringRef Token = static_cast<ARMOperand &>(*Operands[0]).getToken(); | |||
10233 | if ((Token != "add" && Token != "sub") || | |||
10234 | ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1) | |||
10235 | break; | |||
10236 | switch (Inst.getOpcode()) { | |||
10237 | case ARM::t2ADDri12: | |||
10238 | Inst.setOpcode(ARM::t2ADDri); | |||
10239 | break; | |||
10240 | case ARM::t2SUBri12: | |||
10241 | Inst.setOpcode(ARM::t2SUBri); | |||
10242 | break; | |||
10243 | case ARM::t2ADDspImm12: | |||
10244 | Inst.setOpcode(ARM::t2ADDspImm); | |||
10245 | break; | |||
10246 | case ARM::t2SUBspImm12: | |||
10247 | Inst.setOpcode(ARM::t2SUBspImm); | |||
10248 | break; | |||
10249 | } | |||
10250 | ||||
10251 | Inst.addOperand(MCOperand::createReg(0)); // cc_out | |||
10252 | return true; | |||
10253 | } | |||
10254 | case ARM::tADDi8: | |||
10255 | // If the immediate is in the range 0-7, we want tADDi3 iff Rd was | |||
10256 | // explicitly specified. From the ARM ARM: "Encoding T1 is preferred | |||
10257 | // to encoding T2 if <Rd> is specified and encoding T2 is preferred | |||
10258 | // to encoding T1 if <Rd> is omitted." | |||
10259 | if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) { | |||
10260 | Inst.setOpcode(ARM::tADDi3); | |||
10261 | return true; | |||
10262 | } | |||
10263 | break; | |||
10264 | case ARM::tSUBi8: | |||
10265 | // If the immediate is in the range 0-7, we want tADDi3 iff Rd was | |||
10266 | // explicitly specified. From the ARM ARM: "Encoding T1 is preferred | |||
10267 | // to encoding T2 if <Rd> is specified and encoding T2 is preferred | |||
10268 | // to encoding T1 if <Rd> is omitted." | |||
10269 | if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) { | |||
10270 | Inst.setOpcode(ARM::tSUBi3); | |||
10271 | return true; | |||
10272 | } | |||
10273 | break; | |||
10274 | case ARM::t2ADDri: | |||
10275 | case ARM::t2SUBri: { | |||
10276 | // If the destination and first source operand are the same, and | |||
10277 | // the flags are compatible with the current IT status, use encoding T2 | |||
10278 | // instead of T3. For compatibility with the system 'as'. Make sure the | |||
10279 | // wide encoding wasn't explicit. | |||
10280 | if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() || | |||
10281 | !isARMLowRegister(Inst.getOperand(0).getReg()) || | |||
10282 | (Inst.getOperand(2).isImm() && | |||
10283 | (unsigned)Inst.getOperand(2).getImm() > 255) || | |||
10284 | Inst.getOperand(5).getReg() != (inITBlock() ? 0 : ARM::CPSR) || | |||
10285 | HasWideQualifier) | |||
10286 | break; | |||
10287 | MCInst TmpInst; | |||
10288 | TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ? | |||
10289 | ARM::tADDi8 : ARM::tSUBi8); | |||
10290 | TmpInst.addOperand(Inst.getOperand(0)); | |||
10291 | TmpInst.addOperand(Inst.getOperand(5)); | |||
10292 | TmpInst.addOperand(Inst.getOperand(0)); | |||
10293 | TmpInst.addOperand(Inst.getOperand(2)); | |||
10294 | TmpInst.addOperand(Inst.getOperand(3)); | |||
10295 | TmpInst.addOperand(Inst.getOperand(4)); | |||
10296 | Inst = TmpInst; | |||
10297 | return true; | |||
10298 | } | |||
10299 | case ARM::t2ADDspImm: | |||
10300 | case ARM::t2SUBspImm: { | |||
10301 | // Prefer T1 encoding if possible | |||
10302 | if (Inst.getOperand(5).getReg() != 0 || HasWideQualifier) | |||
10303 | break; | |||
10304 | unsigned V = Inst.getOperand(2).getImm(); | |||
10305 | if (V & 3 || V > ((1 << 7) - 1) << 2) | |||
10306 | break; | |||
10307 | MCInst TmpInst; | |||
10308 | TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDspImm ? ARM::tADDspi | |||
10309 | : ARM::tSUBspi); | |||
10310 | TmpInst.addOperand(MCOperand::createReg(ARM::SP)); // destination reg | |||
10311 | TmpInst.addOperand(MCOperand::createReg(ARM::SP)); // source reg | |||
10312 | TmpInst.addOperand(MCOperand::createImm(V / 4)); // immediate | |||
10313 | TmpInst.addOperand(Inst.getOperand(3)); // pred | |||
10314 | TmpInst.addOperand(Inst.getOperand(4)); | |||
10315 | Inst = TmpInst; | |||
10316 | return true; | |||
10317 | } | |||
10318 | case ARM::t2ADDrr: { | |||
10319 | // If the destination and first source operand are the same, and | |||
10320 | // there's no setting of the flags, use encoding T2 instead of T3. | |||
10321 | // Note that this is only for ADD, not SUB. This mirrors the system | |||
10322 | // 'as' behaviour. Also take advantage of ADD being commutative. | |||
10323 | // Make sure the wide encoding wasn't explicit. | |||
10324 | bool Swap = false; | |||
10325 | auto DestReg = Inst.getOperand(0).getReg(); | |||
10326 | bool Transform = DestReg == Inst.getOperand(1).getReg(); | |||
10327 | if (!Transform && DestReg == Inst.getOperand(2).getReg()) { | |||
10328 | Transform = true; | |||
10329 | Swap = true; | |||
10330 | } | |||
10331 | if (!Transform || | |||
10332 | Inst.getOperand(5).getReg() != 0 || | |||
10333 | HasWideQualifier) | |||
10334 | break; | |||
10335 | MCInst TmpInst; | |||
10336 | TmpInst.setOpcode(ARM::tADDhirr); | |||
10337 | TmpInst.addOperand(Inst.getOperand(0)); | |||
10338 | TmpInst.addOperand(Inst.getOperand(0)); | |||
10339 | TmpInst.addOperand(Inst.getOperand(Swap ? 1 : 2)); | |||
10340 | TmpInst.addOperand(Inst.getOperand(3)); | |||
10341 | TmpInst.addOperand(Inst.getOperand(4)); | |||
10342 | Inst = TmpInst; | |||
10343 | return true; | |||
10344 | } | |||
10345 | case ARM::tADDrSP: | |||
10346 | // If the non-SP source operand and the destination operand are not the | |||
10347 | // same, we need to use the 32-bit encoding if it's available. | |||
10348 | if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) { | |||
10349 | Inst.setOpcode(ARM::t2ADDrr); | |||
10350 | Inst.addOperand(MCOperand::createReg(0)); // cc_out | |||
10351 | return true; | |||
10352 | } | |||
10353 | break; | |||
10354 | case ARM::tB: | |||
10355 | // A Thumb conditional branch outside of an IT block is a tBcc. | |||
10356 | if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) { | |||
10357 | Inst.setOpcode(ARM::tBcc); | |||
10358 | return true; | |||
10359 | } | |||
10360 | break; | |||
10361 | case ARM::t2B: | |||
10362 | // A Thumb2 conditional branch outside of an IT block is a t2Bcc. | |||
10363 | if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){ | |||
10364 | Inst.setOpcode(ARM::t2Bcc); | |||
10365 | return true; | |||
10366 | } | |||
10367 | break; | |||
10368 | case ARM::t2Bcc: | |||
10369 | // If the conditional is AL or we're in an IT block, we really want t2B. | |||
10370 | if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) { | |||
10371 | Inst.setOpcode(ARM::t2B); | |||
10372 | return true; | |||
10373 | } | |||
10374 | break; | |||
10375 | case ARM::tBcc: | |||
10376 | // If the conditional is AL, we really want tB. | |||
10377 | if (Inst.getOperand(1).getImm() == ARMCC::AL) { | |||
10378 | Inst.setOpcode(ARM::tB); | |||
10379 | return true; | |||
10380 | } | |||
10381 | break; | |||
10382 | case ARM::tLDMIA: { | |||
10383 | // If the register list contains any high registers, or if the writeback | |||
10384 | // doesn't match what tLDMIA can do, we need to use the 32-bit encoding | |||
10385 | // instead if we're in Thumb2. Otherwise, this should have generated | |||
10386 | // an error in validateInstruction(). | |||
10387 | unsigned Rn = Inst.getOperand(0).getReg(); | |||
10388 | bool hasWritebackToken = | |||
10389 | (static_cast<ARMOperand &>(*Operands[3]).isToken() && | |||
10390 | static_cast<ARMOperand &>(*Operands[3]).getToken() == "!"); | |||
10391 | bool listContainsBase; | |||
10392 | if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) || | |||
10393 | (!listContainsBase && !hasWritebackToken) || | |||
10394 | (listContainsBase && hasWritebackToken)) { | |||
10395 | // 16-bit encoding isn't sufficient. Switch to the 32-bit version. | |||
10396 | assert(isThumbTwo())(static_cast <bool> (isThumbTwo()) ? void (0) : __assert_fail ("isThumbTwo()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 10396, __extension__ __PRETTY_FUNCTION__)); | |||
10397 | Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA); | |||
10398 | // If we're switching to the updating version, we need to insert | |||
10399 | // the writeback tied operand. | |||
10400 | if (hasWritebackToken) | |||
10401 | Inst.insert(Inst.begin(), | |||
10402 | MCOperand::createReg(Inst.getOperand(0).getReg())); | |||
10403 | return true; | |||
10404 | } | |||
10405 | break; | |||
10406 | } | |||
10407 | case ARM::tSTMIA_UPD: { | |||
10408 | // If the register list contains any high registers, we need to use | |||
10409 | // the 32-bit encoding instead if we're in Thumb2. Otherwise, this | |||
10410 | // should have generated an error in validateInstruction(). | |||
10411 | unsigned Rn = Inst.getOperand(0).getReg(); | |||
10412 | bool listContainsBase; | |||
10413 | if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) { | |||
10414 | // 16-bit encoding isn't sufficient. Switch to the 32-bit version. | |||
10415 | assert(isThumbTwo())(static_cast <bool> (isThumbTwo()) ? void (0) : __assert_fail ("isThumbTwo()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 10415, __extension__ __PRETTY_FUNCTION__)); | |||
10416 | Inst.setOpcode(ARM::t2STMIA_UPD); | |||
10417 | return true; | |||
10418 | } | |||
10419 | break; | |||
10420 | } | |||
10421 | case ARM::tPOP: { | |||
10422 | bool listContainsBase; | |||
10423 | // If the register list contains any high registers, we need to use | |||
10424 | // the 32-bit encoding instead if we're in Thumb2. Otherwise, this | |||
10425 | // should have generated an error in validateInstruction(). | |||
10426 | if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase)) | |||
10427 | return false; | |||
10428 | assert(isThumbTwo())(static_cast <bool> (isThumbTwo()) ? void (0) : __assert_fail ("isThumbTwo()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 10428, __extension__ __PRETTY_FUNCTION__)); | |||
10429 | Inst.setOpcode(ARM::t2LDMIA_UPD); | |||
10430 | // Add the base register and writeback operands. | |||
10431 | Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP)); | |||
10432 | Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP)); | |||
10433 | return true; | |||
10434 | } | |||
10435 | case ARM::tPUSH: { | |||
10436 | bool listContainsBase; | |||
10437 | if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase)) | |||
10438 | return false; | |||
10439 | assert(isThumbTwo())(static_cast <bool> (isThumbTwo()) ? void (0) : __assert_fail ("isThumbTwo()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 10439, __extension__ __PRETTY_FUNCTION__)); | |||
10440 | Inst.setOpcode(ARM::t2STMDB_UPD); | |||
10441 | // Add the base register and writeback operands. | |||
10442 | Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP)); | |||
10443 | Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP)); | |||
10444 | return true; | |||
10445 | } | |||
10446 | case ARM::t2MOVi: | |||
10447 | // If we can use the 16-bit encoding and the user didn't explicitly | |||
10448 | // request the 32-bit variant, transform it here. | |||
10449 | if (isARMLowRegister(Inst.getOperand(0).getReg()) && | |||
10450 | (Inst.getOperand(1).isImm() && | |||
10451 | (unsigned)Inst.getOperand(1).getImm() <= 255) && | |||
10452 | Inst.getOperand(4).getReg() == (inITBlock() ? 0 : ARM::CPSR) && | |||
10453 | !HasWideQualifier) { | |||
10454 | // The operands aren't in the same order for tMOVi8... | |||
10455 | MCInst TmpInst; | |||
10456 | TmpInst.setOpcode(ARM::tMOVi8); | |||
10457 | TmpInst.addOperand(Inst.getOperand(0)); | |||
10458 | TmpInst.addOperand(Inst.getOperand(4)); | |||
10459 | TmpInst.addOperand(Inst.getOperand(1)); | |||
10460 | TmpInst.addOperand(Inst.getOperand(2)); | |||
10461 | TmpInst.addOperand(Inst.getOperand(3)); | |||
10462 | Inst = TmpInst; | |||
10463 | return true; | |||
10464 | } | |||
10465 | break; | |||
10466 | ||||
10467 | case ARM::t2MOVr: | |||
10468 | // If we can use the 16-bit encoding and the user didn't explicitly | |||
10469 | // request the 32-bit variant, transform it here. | |||
10470 | if (isARMLowRegister(Inst.getOperand(0).getReg()) && | |||
10471 | isARMLowRegister(Inst.getOperand(1).getReg()) && | |||
10472 | Inst.getOperand(2).getImm() == ARMCC::AL && | |||
10473 | Inst.getOperand(4).getReg() == ARM::CPSR && | |||
10474 | !HasWideQualifier) { | |||
10475 | // The operands aren't the same for tMOV[S]r... (no cc_out) | |||
10476 | MCInst TmpInst; | |||
10477 | unsigned Op = Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr; | |||
10478 | TmpInst.setOpcode(Op); | |||
10479 | TmpInst.addOperand(Inst.getOperand(0)); | |||
10480 | TmpInst.addOperand(Inst.getOperand(1)); | |||
10481 | if (Op == ARM::tMOVr) { | |||
10482 | TmpInst.addOperand(Inst.getOperand(2)); | |||
10483 | TmpInst.addOperand(Inst.getOperand(3)); | |||
10484 | } | |||
10485 | Inst = TmpInst; | |||
10486 | return true; | |||
10487 | } | |||
10488 | break; | |||
10489 | ||||
10490 | case ARM::t2SXTH: | |||
10491 | case ARM::t2SXTB: | |||
10492 | case ARM::t2UXTH: | |||
10493 | case ARM::t2UXTB: | |||
10494 | // If we can use the 16-bit encoding and the user didn't explicitly | |||
10495 | // request the 32-bit variant, transform it here. | |||
10496 | if (isARMLowRegister(Inst.getOperand(0).getReg()) && | |||
10497 | isARMLowRegister(Inst.getOperand(1).getReg()) && | |||
10498 | Inst.getOperand(2).getImm() == 0 && | |||
10499 | !HasWideQualifier) { | |||
10500 | unsigned NewOpc; | |||
10501 | switch (Inst.getOpcode()) { | |||
10502 | default: llvm_unreachable("Illegal opcode!")::llvm::llvm_unreachable_internal("Illegal opcode!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 10502); | |||
10503 | case ARM::t2SXTH: NewOpc = ARM::tSXTH; break; | |||
10504 | case ARM::t2SXTB: NewOpc = ARM::tSXTB; break; | |||
10505 | case ARM::t2UXTH: NewOpc = ARM::tUXTH; break; | |||
10506 | case ARM::t2UXTB: NewOpc = ARM::tUXTB; break; | |||
10507 | } | |||
10508 | // The operands aren't the same for thumb1 (no rotate operand). | |||
10509 | MCInst TmpInst; | |||
10510 | TmpInst.setOpcode(NewOpc); | |||
10511 | TmpInst.addOperand(Inst.getOperand(0)); | |||
10512 | TmpInst.addOperand(Inst.getOperand(1)); | |||
10513 | TmpInst.addOperand(Inst.getOperand(3)); | |||
10514 | TmpInst.addOperand(Inst.getOperand(4)); | |||
10515 | Inst = TmpInst; | |||
10516 | return true; | |||
10517 | } | |||
10518 | break; | |||
10519 | ||||
10520 | case ARM::MOVsi: { | |||
10521 | ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm()); | |||
10522 | // rrx shifts and asr/lsr of #32 is encoded as 0 | |||
10523 | if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr) | |||
10524 | return false; | |||
10525 | if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) { | |||
10526 | // Shifting by zero is accepted as a vanilla 'MOVr' | |||
10527 | MCInst TmpInst; | |||
10528 | TmpInst.setOpcode(ARM::MOVr); | |||
10529 | TmpInst.addOperand(Inst.getOperand(0)); | |||
10530 | TmpInst.addOperand(Inst.getOperand(1)); | |||
10531 | TmpInst.addOperand(Inst.getOperand(3)); | |||
10532 | TmpInst.addOperand(Inst.getOperand(4)); | |||
10533 | TmpInst.addOperand(Inst.getOperand(5)); | |||
10534 | Inst = TmpInst; | |||
10535 | return true; | |||
10536 | } | |||
10537 | return false; | |||
10538 | } | |||
10539 | case ARM::ANDrsi: | |||
10540 | case ARM::ORRrsi: | |||
10541 | case ARM::EORrsi: | |||
10542 | case ARM::BICrsi: | |||
10543 | case ARM::SUBrsi: | |||
10544 | case ARM::ADDrsi: { | |||
10545 | unsigned newOpc; | |||
10546 | ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm()); | |||
10547 | if (SOpc == ARM_AM::rrx) return false; | |||
10548 | switch (Inst.getOpcode()) { | |||
10549 | default: llvm_unreachable("unexpected opcode!")::llvm::llvm_unreachable_internal("unexpected opcode!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 10549); | |||
10550 | case ARM::ANDrsi: newOpc = ARM::ANDrr; break; | |||
10551 | case ARM::ORRrsi: newOpc = ARM::ORRrr; break; | |||
10552 | case ARM::EORrsi: newOpc = ARM::EORrr; break; | |||
10553 | case ARM::BICrsi: newOpc = ARM::BICrr; break; | |||
10554 | case ARM::SUBrsi: newOpc = ARM::SUBrr; break; | |||
10555 | case ARM::ADDrsi: newOpc = ARM::ADDrr; break; | |||
10556 | } | |||
10557 | // If the shift is by zero, use the non-shifted instruction definition. | |||
10558 | // The exception is for right shifts, where 0 == 32 | |||
10559 | if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0 && | |||
10560 | !(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) { | |||
10561 | MCInst TmpInst; | |||
10562 | TmpInst.setOpcode(newOpc); | |||
10563 | TmpInst.addOperand(Inst.getOperand(0)); | |||
10564 | TmpInst.addOperand(Inst.getOperand(1)); | |||
10565 | TmpInst.addOperand(Inst.getOperand(2)); | |||
10566 | TmpInst.addOperand(Inst.getOperand(4)); | |||
10567 | TmpInst.addOperand(Inst.getOperand(5)); | |||
10568 | TmpInst.addOperand(Inst.getOperand(6)); | |||
10569 | Inst = TmpInst; | |||
10570 | return true; | |||
10571 | } | |||
10572 | return false; | |||
10573 | } | |||
10574 | case ARM::ITasm: | |||
10575 | case ARM::t2IT: { | |||
10576 | // Set up the IT block state according to the IT instruction we just | |||
10577 | // matched. | |||
10578 | assert(!inITBlock() && "nested IT blocks?!")(static_cast <bool> (!inITBlock() && "nested IT blocks?!" ) ? void (0) : __assert_fail ("!inITBlock() && \"nested IT blocks?!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 10578, __extension__ __PRETTY_FUNCTION__)); | |||
10579 | startExplicitITBlock(ARMCC::CondCodes(Inst.getOperand(0).getImm()), | |||
10580 | Inst.getOperand(1).getImm()); | |||
10581 | break; | |||
10582 | } | |||
10583 | case ARM::t2LSLrr: | |||
10584 | case ARM::t2LSRrr: | |||
10585 | case ARM::t2ASRrr: | |||
10586 | case ARM::t2SBCrr: | |||
10587 | case ARM::t2RORrr: | |||
10588 | case ARM::t2BICrr: | |||
10589 | // Assemblers should use the narrow encodings of these instructions when permissible. | |||
10590 | if ((isARMLowRegister(Inst.getOperand(1).getReg()) && | |||
10591 | isARMLowRegister(Inst.getOperand(2).getReg())) && | |||
10592 | Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() && | |||
10593 | Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) && | |||
10594 | !HasWideQualifier) { | |||
10595 | unsigned NewOpc; | |||
10596 | switch (Inst.getOpcode()) { | |||
10597 | default: llvm_unreachable("unexpected opcode")::llvm::llvm_unreachable_internal("unexpected opcode", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 10597); | |||
10598 | case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break; | |||
10599 | case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break; | |||
10600 | case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break; | |||
10601 | case ARM::t2SBCrr: NewOpc = ARM::tSBC; break; | |||
10602 | case ARM::t2RORrr: NewOpc = ARM::tROR; break; | |||
10603 | case ARM::t2BICrr: NewOpc = ARM::tBIC; break; | |||
10604 | } | |||
10605 | MCInst TmpInst; | |||
10606 | TmpInst.setOpcode(NewOpc); | |||
10607 | TmpInst.addOperand(Inst.getOperand(0)); | |||
10608 | TmpInst.addOperand(Inst.getOperand(5)); | |||
10609 | TmpInst.addOperand(Inst.getOperand(1)); | |||
10610 | TmpInst.addOperand(Inst.getOperand(2)); | |||
10611 | TmpInst.addOperand(Inst.getOperand(3)); | |||
10612 | TmpInst.addOperand(Inst.getOperand(4)); | |||
10613 | Inst = TmpInst; | |||
10614 | return true; | |||
10615 | } | |||
10616 | return false; | |||
10617 | ||||
10618 | case ARM::t2ANDrr: | |||
10619 | case ARM::t2EORrr: | |||
10620 | case ARM::t2ADCrr: | |||
10621 | case ARM::t2ORRrr: | |||
10622 | // Assemblers should use the narrow encodings of these instructions when permissible. | |||
10623 | // These instructions are special in that they are commutable, so shorter encodings | |||
10624 | // are available more often. | |||
10625 | if ((isARMLowRegister(Inst.getOperand(1).getReg()) && | |||
10626 | isARMLowRegister(Inst.getOperand(2).getReg())) && | |||
10627 | (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() || | |||
10628 | Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) && | |||
10629 | Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) && | |||
10630 | !HasWideQualifier) { | |||
10631 | unsigned NewOpc; | |||
10632 | switch (Inst.getOpcode()) { | |||
10633 | default: llvm_unreachable("unexpected opcode")::llvm::llvm_unreachable_internal("unexpected opcode", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 10633); | |||
10634 | case ARM::t2ADCrr: NewOpc = ARM::tADC; break; | |||
10635 | case ARM::t2ANDrr: NewOpc = ARM::tAND; break; | |||
10636 | case ARM::t2EORrr: NewOpc = ARM::tEOR; break; | |||
10637 | case ARM::t2ORRrr: NewOpc = ARM::tORR; break; | |||
10638 | } | |||
10639 | MCInst TmpInst; | |||
10640 | TmpInst.setOpcode(NewOpc); | |||
10641 | TmpInst.addOperand(Inst.getOperand(0)); | |||
10642 | TmpInst.addOperand(Inst.getOperand(5)); | |||
10643 | if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) { | |||
10644 | TmpInst.addOperand(Inst.getOperand(1)); | |||
10645 | TmpInst.addOperand(Inst.getOperand(2)); | |||
10646 | } else { | |||
10647 | TmpInst.addOperand(Inst.getOperand(2)); | |||
10648 | TmpInst.addOperand(Inst.getOperand(1)); | |||
10649 | } | |||
10650 | TmpInst.addOperand(Inst.getOperand(3)); | |||
10651 | TmpInst.addOperand(Inst.getOperand(4)); | |||
10652 | Inst = TmpInst; | |||
10653 | return true; | |||
10654 | } | |||
10655 | return false; | |||
10656 | case ARM::MVE_VPST: | |||
10657 | case ARM::MVE_VPTv16i8: | |||
10658 | case ARM::MVE_VPTv8i16: | |||
10659 | case ARM::MVE_VPTv4i32: | |||
10660 | case ARM::MVE_VPTv16u8: | |||
10661 | case ARM::MVE_VPTv8u16: | |||
10662 | case ARM::MVE_VPTv4u32: | |||
10663 | case ARM::MVE_VPTv16s8: | |||
10664 | case ARM::MVE_VPTv8s16: | |||
10665 | case ARM::MVE_VPTv4s32: | |||
10666 | case ARM::MVE_VPTv4f32: | |||
10667 | case ARM::MVE_VPTv8f16: | |||
10668 | case ARM::MVE_VPTv16i8r: | |||
10669 | case ARM::MVE_VPTv8i16r: | |||
10670 | case ARM::MVE_VPTv4i32r: | |||
10671 | case ARM::MVE_VPTv16u8r: | |||
10672 | case ARM::MVE_VPTv8u16r: | |||
10673 | case ARM::MVE_VPTv4u32r: | |||
10674 | case ARM::MVE_VPTv16s8r: | |||
10675 | case ARM::MVE_VPTv8s16r: | |||
10676 | case ARM::MVE_VPTv4s32r: | |||
10677 | case ARM::MVE_VPTv4f32r: | |||
10678 | case ARM::MVE_VPTv8f16r: { | |||
10679 | assert(!inVPTBlock() && "Nested VPT blocks are not allowed")(static_cast <bool> (!inVPTBlock() && "Nested VPT blocks are not allowed" ) ? void (0) : __assert_fail ("!inVPTBlock() && \"Nested VPT blocks are not allowed\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 10679, __extension__ __PRETTY_FUNCTION__)); | |||
10680 | MCOperand &MO = Inst.getOperand(0); | |||
10681 | VPTState.Mask = MO.getImm(); | |||
10682 | VPTState.CurPosition = 0; | |||
10683 | break; | |||
10684 | } | |||
10685 | } | |||
10686 | return false; | |||
10687 | } | |||
10688 | ||||
10689 | unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) { | |||
10690 | // 16-bit thumb arithmetic instructions either require or preclude the 'S' | |||
10691 | // suffix depending on whether they're in an IT block or not. | |||
10692 | unsigned Opc = Inst.getOpcode(); | |||
10693 | const MCInstrDesc &MCID = MII.get(Opc); | |||
10694 | if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) { | |||
10695 | assert(MCID.hasOptionalDef() &&(static_cast <bool> (MCID.hasOptionalDef() && "optionally flag setting instruction missing optional def operand" ) ? void (0) : __assert_fail ("MCID.hasOptionalDef() && \"optionally flag setting instruction missing optional def operand\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 10696, __extension__ __PRETTY_FUNCTION__)) | |||
10696 | "optionally flag setting instruction missing optional def operand")(static_cast <bool> (MCID.hasOptionalDef() && "optionally flag setting instruction missing optional def operand" ) ? void (0) : __assert_fail ("MCID.hasOptionalDef() && \"optionally flag setting instruction missing optional def operand\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 10696, __extension__ __PRETTY_FUNCTION__)); | |||
10697 | assert(MCID.NumOperands == Inst.getNumOperands() &&(static_cast <bool> (MCID.NumOperands == Inst.getNumOperands () && "operand count mismatch!") ? void (0) : __assert_fail ("MCID.NumOperands == Inst.getNumOperands() && \"operand count mismatch!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 10698, __extension__ __PRETTY_FUNCTION__)) | |||
10698 | "operand count mismatch!")(static_cast <bool> (MCID.NumOperands == Inst.getNumOperands () && "operand count mismatch!") ? void (0) : __assert_fail ("MCID.NumOperands == Inst.getNumOperands() && \"operand count mismatch!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 10698, __extension__ __PRETTY_FUNCTION__)); | |||
10699 | // Find the optional-def operand (cc_out). | |||
10700 | unsigned OpNo; | |||
10701 | for (OpNo = 0; | |||
10702 | !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands; | |||
10703 | ++OpNo) | |||
10704 | ; | |||
10705 | // If we're parsing Thumb1, reject it completely. | |||
10706 | if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR) | |||
10707 | return Match_RequiresFlagSetting; | |||
10708 | // If we're parsing Thumb2, which form is legal depends on whether we're | |||
10709 | // in an IT block. | |||
10710 | if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR && | |||
10711 | !inITBlock()) | |||
10712 | return Match_RequiresITBlock; | |||
10713 | if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR && | |||
10714 | inITBlock()) | |||
10715 | return Match_RequiresNotITBlock; | |||
10716 | // LSL with zero immediate is not allowed in an IT block | |||
10717 | if (Opc == ARM::tLSLri && Inst.getOperand(3).getImm() == 0 && inITBlock()) | |||
10718 | return Match_RequiresNotITBlock; | |||
10719 | } else if (isThumbOne()) { | |||
10720 | // Some high-register supporting Thumb1 encodings only allow both registers | |||
10721 | // to be from r0-r7 when in Thumb2. | |||
10722 | if (Opc == ARM::tADDhirr && !hasV6MOps() && | |||
10723 | isARMLowRegister(Inst.getOperand(1).getReg()) && | |||
10724 | isARMLowRegister(Inst.getOperand(2).getReg())) | |||
10725 | return Match_RequiresThumb2; | |||
10726 | // Others only require ARMv6 or later. | |||
10727 | else if (Opc == ARM::tMOVr && !hasV6Ops() && | |||
10728 | isARMLowRegister(Inst.getOperand(0).getReg()) && | |||
10729 | isARMLowRegister(Inst.getOperand(1).getReg())) | |||
10730 | return Match_RequiresV6; | |||
10731 | } | |||
10732 | ||||
10733 | // Before ARMv8 the rules for when SP is allowed in t2MOVr are more complex | |||
10734 | // than the loop below can handle, so it uses the GPRnopc register class and | |||
10735 | // we do SP handling here. | |||
10736 | if (Opc == ARM::t2MOVr && !hasV8Ops()) | |||
10737 | { | |||
10738 | // SP as both source and destination is not allowed | |||
10739 | if (Inst.getOperand(0).getReg() == ARM::SP && | |||
10740 | Inst.getOperand(1).getReg() == ARM::SP) | |||
10741 | return Match_RequiresV8; | |||
10742 | // When flags-setting SP as either source or destination is not allowed | |||
10743 | if (Inst.getOperand(4).getReg() == ARM::CPSR && | |||
10744 | (Inst.getOperand(0).getReg() == ARM::SP || | |||
10745 | Inst.getOperand(1).getReg() == ARM::SP)) | |||
10746 | return Match_RequiresV8; | |||
10747 | } | |||
10748 | ||||
10749 | switch (Inst.getOpcode()) { | |||
10750 | case ARM::VMRS: | |||
10751 | case ARM::VMSR: | |||
10752 | case ARM::VMRS_FPCXTS: | |||
10753 | case ARM::VMRS_FPCXTNS: | |||
10754 | case ARM::VMSR_FPCXTS: | |||
10755 | case ARM::VMSR_FPCXTNS: | |||
10756 | case ARM::VMRS_FPSCR_NZCVQC: | |||
10757 | case ARM::VMSR_FPSCR_NZCVQC: | |||
10758 | case ARM::FMSTAT: | |||
10759 | case ARM::VMRS_VPR: | |||
10760 | case ARM::VMRS_P0: | |||
10761 | case ARM::VMSR_VPR: | |||
10762 | case ARM::VMSR_P0: | |||
10763 | // Use of SP for VMRS/VMSR is only allowed in ARM mode with the exception of | |||
10764 | // ARMv8-A. | |||
10765 | if (Inst.getOperand(0).isReg() && Inst.getOperand(0).getReg() == ARM::SP && | |||
10766 | (isThumb() && !hasV8Ops())) | |||
10767 | return Match_InvalidOperand; | |||
10768 | break; | |||
10769 | case ARM::t2TBB: | |||
10770 | case ARM::t2TBH: | |||
10771 | // Rn = sp is only allowed with ARMv8-A | |||
10772 | if (!hasV8Ops() && (Inst.getOperand(0).getReg() == ARM::SP)) | |||
10773 | return Match_RequiresV8; | |||
10774 | break; | |||
10775 | default: | |||
10776 | break; | |||
10777 | } | |||
10778 | ||||
10779 | for (unsigned I = 0; I < MCID.NumOperands; ++I) | |||
10780 | if (MCID.OpInfo[I].RegClass == ARM::rGPRRegClassID) { | |||
10781 | // rGPRRegClass excludes PC, and also excluded SP before ARMv8 | |||
10782 | const auto &Op = Inst.getOperand(I); | |||
10783 | if (!Op.isReg()) { | |||
10784 | // This can happen in awkward cases with tied operands, e.g. a | |||
10785 | // writeback load/store with a complex addressing mode in | |||
10786 | // which there's an output operand corresponding to the | |||
10787 | // updated written-back base register: the Tablegen-generated | |||
10788 | // AsmMatcher will have written a placeholder operand to that | |||
10789 | // slot in the form of an immediate 0, because it can't | |||
10790 | // generate the register part of the complex addressing-mode | |||
10791 | // operand ahead of time. | |||
10792 | continue; | |||
10793 | } | |||
10794 | ||||
10795 | unsigned Reg = Op.getReg(); | |||
10796 | if ((Reg == ARM::SP) && !hasV8Ops()) | |||
10797 | return Match_RequiresV8; | |||
10798 | else if (Reg == ARM::PC) | |||
10799 | return Match_InvalidOperand; | |||
10800 | } | |||
10801 | ||||
10802 | return Match_Success; | |||
10803 | } | |||
10804 | ||||
10805 | namespace llvm { | |||
10806 | ||||
10807 | template <> inline bool IsCPSRDead<MCInst>(const MCInst *Instr) { | |||
10808 | return true; // In an assembly source, no need to second-guess | |||
10809 | } | |||
10810 | ||||
10811 | } // end namespace llvm | |||
10812 | ||||
10813 | // Returns true if Inst is unpredictable if it is in and IT block, but is not | |||
10814 | // the last instruction in the block. | |||
10815 | bool ARMAsmParser::isITBlockTerminator(MCInst &Inst) const { | |||
10816 | const MCInstrDesc &MCID = MII.get(Inst.getOpcode()); | |||
10817 | ||||
10818 | // All branch & call instructions terminate IT blocks with the exception of | |||
10819 | // SVC. | |||
10820 | if (MCID.isTerminator() || (MCID.isCall() && Inst.getOpcode() != ARM::tSVC) || | |||
10821 | MCID.isReturn() || MCID.isBranch() || MCID.isIndirectBranch()) | |||
10822 | return true; | |||
10823 | ||||
10824 | // Any arithmetic instruction which writes to the PC also terminates the IT | |||
10825 | // block. | |||
10826 | if (MCID.hasDefOfPhysReg(Inst, ARM::PC, *MRI)) | |||
10827 | return true; | |||
10828 | ||||
10829 | return false; | |||
10830 | } | |||
10831 | ||||
10832 | unsigned ARMAsmParser::MatchInstruction(OperandVector &Operands, MCInst &Inst, | |||
10833 | SmallVectorImpl<NearMissInfo> &NearMisses, | |||
10834 | bool MatchingInlineAsm, | |||
10835 | bool &EmitInITBlock, | |||
10836 | MCStreamer &Out) { | |||
10837 | // If we can't use an implicit IT block here, just match as normal. | |||
10838 | if (inExplicitITBlock() || !isThumbTwo() || !useImplicitITThumb()) | |||
10839 | return MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm); | |||
10840 | ||||
10841 | // Try to match the instruction in an extension of the current IT block (if | |||
10842 | // there is one). | |||
10843 | if (inImplicitITBlock()) { | |||
10844 | extendImplicitITBlock(ITState.Cond); | |||
10845 | if (MatchInstructionImpl(Operands, Inst, nullptr, MatchingInlineAsm) == | |||
10846 | Match_Success) { | |||
10847 | // The match succeded, but we still have to check that the instruction is | |||
10848 | // valid in this implicit IT block. | |||
10849 | const MCInstrDesc &MCID = MII.get(Inst.getOpcode()); | |||
10850 | if (MCID.isPredicable()) { | |||
10851 | ARMCC::CondCodes InstCond = | |||
10852 | (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx()) | |||
10853 | .getImm(); | |||
10854 | ARMCC::CondCodes ITCond = currentITCond(); | |||
10855 | if (InstCond == ITCond) { | |||
10856 | EmitInITBlock = true; | |||
10857 | return Match_Success; | |||
10858 | } else if (InstCond == ARMCC::getOppositeCondition(ITCond)) { | |||
10859 | invertCurrentITCondition(); | |||
10860 | EmitInITBlock = true; | |||
10861 | return Match_Success; | |||
10862 | } | |||
10863 | } | |||
10864 | } | |||
10865 | rewindImplicitITPosition(); | |||
10866 | } | |||
10867 | ||||
10868 | // Finish the current IT block, and try to match outside any IT block. | |||
10869 | flushPendingInstructions(Out); | |||
10870 | unsigned PlainMatchResult = | |||
10871 | MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm); | |||
10872 | if (PlainMatchResult == Match_Success) { | |||
10873 | const MCInstrDesc &MCID = MII.get(Inst.getOpcode()); | |||
10874 | if (MCID.isPredicable()) { | |||
10875 | ARMCC::CondCodes InstCond = | |||
10876 | (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx()) | |||
10877 | .getImm(); | |||
10878 | // Some forms of the branch instruction have their own condition code | |||
10879 | // fields, so can be conditionally executed without an IT block. | |||
10880 | if (Inst.getOpcode() == ARM::tBcc || Inst.getOpcode() == ARM::t2Bcc) { | |||
10881 | EmitInITBlock = false; | |||
10882 | return Match_Success; | |||
10883 | } | |||
10884 | if (InstCond == ARMCC::AL) { | |||
10885 | EmitInITBlock = false; | |||
10886 | return Match_Success; | |||
10887 | } | |||
10888 | } else { | |||
10889 | EmitInITBlock = false; | |||
10890 | return Match_Success; | |||
10891 | } | |||
10892 | } | |||
10893 | ||||
10894 | // Try to match in a new IT block. The matcher doesn't check the actual | |||
10895 | // condition, so we create an IT block with a dummy condition, and fix it up | |||
10896 | // once we know the actual condition. | |||
10897 | startImplicitITBlock(); | |||
10898 | if (MatchInstructionImpl(Operands, Inst, nullptr, MatchingInlineAsm) == | |||
10899 | Match_Success) { | |||
10900 | const MCInstrDesc &MCID = MII.get(Inst.getOpcode()); | |||
10901 | if (MCID.isPredicable()) { | |||
10902 | ITState.Cond = | |||
10903 | (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx()) | |||
10904 | .getImm(); | |||
10905 | EmitInITBlock = true; | |||
10906 | return Match_Success; | |||
10907 | } | |||
10908 | } | |||
10909 | discardImplicitITBlock(); | |||
10910 | ||||
10911 | // If none of these succeed, return the error we got when trying to match | |||
10912 | // outside any IT blocks. | |||
10913 | EmitInITBlock = false; | |||
10914 | return PlainMatchResult; | |||
10915 | } | |||
10916 | ||||
10917 | static std::string ARMMnemonicSpellCheck(StringRef S, const FeatureBitset &FBS, | |||
10918 | unsigned VariantID = 0); | |||
10919 | ||||
10920 | static const char *getSubtargetFeatureName(uint64_t Val); | |||
10921 | bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, | |||
10922 | OperandVector &Operands, | |||
10923 | MCStreamer &Out, uint64_t &ErrorInfo, | |||
10924 | bool MatchingInlineAsm) { | |||
10925 | MCInst Inst; | |||
10926 | unsigned MatchResult; | |||
10927 | bool PendConditionalInstruction = false; | |||
10928 | ||||
10929 | SmallVector<NearMissInfo, 4> NearMisses; | |||
10930 | MatchResult = MatchInstruction(Operands, Inst, NearMisses, MatchingInlineAsm, | |||
| ||||
10931 | PendConditionalInstruction, Out); | |||
10932 | ||||
10933 | switch (MatchResult) { | |||
10934 | case Match_Success: | |||
10935 | LLVM_DEBUG(dbgs() << "Parsed as: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("asm-parser")) { dbgs() << "Parsed as: "; Inst.dump_pretty (dbgs(), MII.getName(Inst.getOpcode())); dbgs() << "\n" ; } } while (false) | |||
10936 | Inst.dump_pretty(dbgs(), MII.getName(Inst.getOpcode()));do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("asm-parser")) { dbgs() << "Parsed as: "; Inst.dump_pretty (dbgs(), MII.getName(Inst.getOpcode())); dbgs() << "\n" ; } } while (false) | |||
10937 | dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("asm-parser")) { dbgs() << "Parsed as: "; Inst.dump_pretty (dbgs(), MII.getName(Inst.getOpcode())); dbgs() << "\n" ; } } while (false); | |||
10938 | ||||
10939 | // Context sensitive operand constraints aren't handled by the matcher, | |||
10940 | // so check them here. | |||
10941 | if (validateInstruction(Inst, Operands)) { | |||
10942 | // Still progress the IT block, otherwise one wrong condition causes | |||
10943 | // nasty cascading errors. | |||
10944 | forwardITPosition(); | |||
10945 | forwardVPTPosition(); | |||
10946 | return true; | |||
10947 | } | |||
10948 | ||||
10949 | { // processInstruction() updates inITBlock state, we need to save it away | |||
10950 | bool wasInITBlock = inITBlock(); | |||
10951 | ||||
10952 | // Some instructions need post-processing to, for example, tweak which | |||
10953 | // encoding is selected. Loop on it while changes happen so the | |||
10954 | // individual transformations can chain off each other. E.g., | |||
10955 | // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8) | |||
10956 | while (processInstruction(Inst, Operands, Out)) | |||
10957 | LLVM_DEBUG(dbgs() << "Changed to: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("asm-parser")) { dbgs() << "Changed to: "; Inst.dump_pretty (dbgs(), MII.getName(Inst.getOpcode())); dbgs() << "\n" ; } } while (false) | |||
10958 | Inst.dump_pretty(dbgs(), MII.getName(Inst.getOpcode()));do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("asm-parser")) { dbgs() << "Changed to: "; Inst.dump_pretty (dbgs(), MII.getName(Inst.getOpcode())); dbgs() << "\n" ; } } while (false) | |||
10959 | dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("asm-parser")) { dbgs() << "Changed to: "; Inst.dump_pretty (dbgs(), MII.getName(Inst.getOpcode())); dbgs() << "\n" ; } } while (false); | |||
10960 | ||||
10961 | // Only after the instruction is fully processed, we can validate it | |||
10962 | if (wasInITBlock && hasV8Ops() && isThumb() && | |||
10963 | !isV8EligibleForIT(&Inst)) { | |||
10964 | Warning(IDLoc, "deprecated instruction in IT block"); | |||
10965 | } | |||
10966 | } | |||
10967 | ||||
10968 | // Only move forward at the very end so that everything in validate | |||
10969 | // and process gets a consistent answer about whether we're in an IT | |||
10970 | // block. | |||
10971 | forwardITPosition(); | |||
10972 | forwardVPTPosition(); | |||
10973 | ||||
10974 | // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and | |||
10975 | // doesn't actually encode. | |||
10976 | if (Inst.getOpcode() == ARM::ITasm) | |||
10977 | return false; | |||
10978 | ||||
10979 | Inst.setLoc(IDLoc); | |||
10980 | if (PendConditionalInstruction) { | |||
10981 | PendingConditionalInsts.push_back(Inst); | |||
10982 | if (isITBlockFull() || isITBlockTerminator(Inst)) | |||
10983 | flushPendingInstructions(Out); | |||
10984 | } else { | |||
10985 | Out.emitInstruction(Inst, getSTI()); | |||
10986 | } | |||
10987 | return false; | |||
10988 | case Match_NearMisses: | |||
10989 | ReportNearMisses(NearMisses, IDLoc, Operands); | |||
10990 | return true; | |||
10991 | case Match_MnemonicFail: { | |||
10992 | FeatureBitset FBS = ComputeAvailableFeatures(getSTI().getFeatureBits()); | |||
10993 | std::string Suggestion = ARMMnemonicSpellCheck( | |||
10994 | ((ARMOperand &)*Operands[0]).getToken(), FBS); | |||
10995 | return Error(IDLoc, "invalid instruction" + Suggestion, | |||
10996 | ((ARMOperand &)*Operands[0]).getLocRange()); | |||
10997 | } | |||
10998 | } | |||
10999 | ||||
11000 | llvm_unreachable("Implement any new match types added!")::llvm::llvm_unreachable_internal("Implement any new match types added!" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 11000); | |||
11001 | } | |||
11002 | ||||
11003 | /// parseDirective parses the arm specific directives | |||
11004 | bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) { | |||
11005 | const MCContext::Environment Format = getContext().getObjectFileType(); | |||
11006 | bool IsMachO = Format == MCContext::IsMachO; | |||
11007 | bool IsCOFF = Format == MCContext::IsCOFF; | |||
11008 | ||||
11009 | std::string IDVal = DirectiveID.getIdentifier().lower(); | |||
11010 | if (IDVal == ".word") | |||
11011 | parseLiteralValues(4, DirectiveID.getLoc()); | |||
11012 | else if (IDVal == ".short" || IDVal == ".hword") | |||
11013 | parseLiteralValues(2, DirectiveID.getLoc()); | |||
11014 | else if (IDVal == ".thumb") | |||
11015 | parseDirectiveThumb(DirectiveID.getLoc()); | |||
11016 | else if (IDVal == ".arm") | |||
11017 | parseDirectiveARM(DirectiveID.getLoc()); | |||
11018 | else if (IDVal == ".thumb_func") | |||
11019 | parseDirectiveThumbFunc(DirectiveID.getLoc()); | |||
11020 | else if (IDVal == ".code") | |||
11021 | parseDirectiveCode(DirectiveID.getLoc()); | |||
11022 | else if (IDVal == ".syntax") | |||
11023 | parseDirectiveSyntax(DirectiveID.getLoc()); | |||
11024 | else if (IDVal == ".unreq") | |||
11025 | parseDirectiveUnreq(DirectiveID.getLoc()); | |||
11026 | else if (IDVal == ".fnend") | |||
11027 | parseDirectiveFnEnd(DirectiveID.getLoc()); | |||
11028 | else if (IDVal == ".cantunwind") | |||
11029 | parseDirectiveCantUnwind(DirectiveID.getLoc()); | |||
11030 | else if (IDVal == ".personality") | |||
11031 | parseDirectivePersonality(DirectiveID.getLoc()); | |||
11032 | else if (IDVal == ".handlerdata") | |||
11033 | parseDirectiveHandlerData(DirectiveID.getLoc()); | |||
11034 | else if (IDVal == ".setfp") | |||
11035 | parseDirectiveSetFP(DirectiveID.getLoc()); | |||
11036 | else if (IDVal == ".pad") | |||
11037 | parseDirectivePad(DirectiveID.getLoc()); | |||
11038 | else if (IDVal == ".save") | |||
11039 | parseDirectiveRegSave(DirectiveID.getLoc(), false); | |||
11040 | else if (IDVal == ".vsave") | |||
11041 | parseDirectiveRegSave(DirectiveID.getLoc(), true); | |||
11042 | else if (IDVal == ".ltorg" || IDVal == ".pool") | |||
11043 | parseDirectiveLtorg(DirectiveID.getLoc()); | |||
11044 | else if (IDVal == ".even") | |||
11045 | parseDirectiveEven(DirectiveID.getLoc()); | |||
11046 | else if (IDVal == ".personalityindex") | |||
11047 | parseDirectivePersonalityIndex(DirectiveID.getLoc()); | |||
11048 | else if (IDVal == ".unwind_raw") | |||
11049 | parseDirectiveUnwindRaw(DirectiveID.getLoc()); | |||
11050 | else if (IDVal == ".movsp") | |||
11051 | parseDirectiveMovSP(DirectiveID.getLoc()); | |||
11052 | else if (IDVal == ".arch_extension") | |||
11053 | parseDirectiveArchExtension(DirectiveID.getLoc()); | |||
11054 | else if (IDVal == ".align") | |||
11055 | return parseDirectiveAlign(DirectiveID.getLoc()); // Use Generic on failure. | |||
11056 | else if (IDVal == ".thumb_set") | |||
11057 | parseDirectiveThumbSet(DirectiveID.getLoc()); | |||
11058 | else if (IDVal == ".inst") | |||
11059 | parseDirectiveInst(DirectiveID.getLoc()); | |||
11060 | else if (IDVal == ".inst.n") | |||
11061 | parseDirectiveInst(DirectiveID.getLoc(), 'n'); | |||
11062 | else if (IDVal == ".inst.w") | |||
11063 | parseDirectiveInst(DirectiveID.getLoc(), 'w'); | |||
11064 | else if (!IsMachO && !IsCOFF) { | |||
11065 | if (IDVal == ".arch") | |||
11066 | parseDirectiveArch(DirectiveID.getLoc()); | |||
11067 | else if (IDVal == ".cpu") | |||
11068 | parseDirectiveCPU(DirectiveID.getLoc()); | |||
11069 | else if (IDVal == ".eabi_attribute") | |||
11070 | parseDirectiveEabiAttr(DirectiveID.getLoc()); | |||
11071 | else if (IDVal == ".fpu") | |||
11072 | parseDirectiveFPU(DirectiveID.getLoc()); | |||
11073 | else if (IDVal == ".fnstart") | |||
11074 | parseDirectiveFnStart(DirectiveID.getLoc()); | |||
11075 | else if (IDVal == ".object_arch") | |||
11076 | parseDirectiveObjectArch(DirectiveID.getLoc()); | |||
11077 | else if (IDVal == ".tlsdescseq") | |||
11078 | parseDirectiveTLSDescSeq(DirectiveID.getLoc()); | |||
11079 | else | |||
11080 | return true; | |||
11081 | } else | |||
11082 | return true; | |||
11083 | return false; | |||
11084 | } | |||
11085 | ||||
11086 | /// parseLiteralValues | |||
11087 | /// ::= .hword expression [, expression]* | |||
11088 | /// ::= .short expression [, expression]* | |||
11089 | /// ::= .word expression [, expression]* | |||
11090 | bool ARMAsmParser::parseLiteralValues(unsigned Size, SMLoc L) { | |||
11091 | auto parseOne = [&]() -> bool { | |||
11092 | const MCExpr *Value; | |||
11093 | if (getParser().parseExpression(Value)) | |||
11094 | return true; | |||
11095 | getParser().getStreamer().emitValue(Value, Size, L); | |||
11096 | return false; | |||
11097 | }; | |||
11098 | return (parseMany(parseOne)); | |||
11099 | } | |||
11100 | ||||
11101 | /// parseDirectiveThumb | |||
11102 | /// ::= .thumb | |||
11103 | bool ARMAsmParser::parseDirectiveThumb(SMLoc L) { | |||
11104 | if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive") || | |||
11105 | check(!hasThumb(), L, "target does not support Thumb mode")) | |||
11106 | return true; | |||
11107 | ||||
11108 | if (!isThumb()) | |||
11109 | SwitchMode(); | |||
11110 | ||||
11111 | getParser().getStreamer().emitAssemblerFlag(MCAF_Code16); | |||
11112 | return false; | |||
11113 | } | |||
11114 | ||||
11115 | /// parseDirectiveARM | |||
11116 | /// ::= .arm | |||
11117 | bool ARMAsmParser::parseDirectiveARM(SMLoc L) { | |||
11118 | if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive") || | |||
11119 | check(!hasARM(), L, "target does not support ARM mode")) | |||
11120 | return true; | |||
11121 | ||||
11122 | if (isThumb()) | |||
11123 | SwitchMode(); | |||
11124 | getParser().getStreamer().emitAssemblerFlag(MCAF_Code32); | |||
11125 | return false; | |||
11126 | } | |||
11127 | ||||
11128 | void ARMAsmParser::doBeforeLabelEmit(MCSymbol *Symbol) { | |||
11129 | // We need to flush the current implicit IT block on a label, because it is | |||
11130 | // not legal to branch into an IT block. | |||
11131 | flushPendingInstructions(getStreamer()); | |||
11132 | } | |||
11133 | ||||
11134 | void ARMAsmParser::onLabelParsed(MCSymbol *Symbol) { | |||
11135 | if (NextSymbolIsThumb) { | |||
11136 | getParser().getStreamer().emitThumbFunc(Symbol); | |||
11137 | NextSymbolIsThumb = false; | |||
11138 | } | |||
11139 | } | |||
11140 | ||||
11141 | /// parseDirectiveThumbFunc | |||
11142 | /// ::= .thumbfunc symbol_name | |||
11143 | bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) { | |||
11144 | MCAsmParser &Parser = getParser(); | |||
11145 | const auto Format = getContext().getObjectFileType(); | |||
11146 | bool IsMachO = Format == MCContext::IsMachO; | |||
11147 | ||||
11148 | // Darwin asm has (optionally) function name after .thumb_func direction | |||
11149 | // ELF doesn't | |||
11150 | ||||
11151 | if (IsMachO) { | |||
11152 | if (Parser.getTok().is(AsmToken::Identifier) || | |||
11153 | Parser.getTok().is(AsmToken::String)) { | |||
11154 | MCSymbol *Func = getParser().getContext().getOrCreateSymbol( | |||
11155 | Parser.getTok().getIdentifier()); | |||
11156 | getParser().getStreamer().emitThumbFunc(Func); | |||
11157 | Parser.Lex(); | |||
11158 | if (parseToken(AsmToken::EndOfStatement, | |||
11159 | "unexpected token in '.thumb_func' directive")) | |||
11160 | return true; | |||
11161 | return false; | |||
11162 | } | |||
11163 | } | |||
11164 | ||||
11165 | if (parseToken(AsmToken::EndOfStatement, | |||
11166 | "unexpected token in '.thumb_func' directive")) | |||
11167 | return true; | |||
11168 | ||||
11169 | // .thumb_func implies .thumb | |||
11170 | if (!isThumb()) | |||
11171 | SwitchMode(); | |||
11172 | ||||
11173 | getParser().getStreamer().emitAssemblerFlag(MCAF_Code16); | |||
11174 | ||||
11175 | NextSymbolIsThumb = true; | |||
11176 | return false; | |||
11177 | } | |||
11178 | ||||
11179 | /// parseDirectiveSyntax | |||
11180 | /// ::= .syntax unified | divided | |||
11181 | bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) { | |||
11182 | MCAsmParser &Parser = getParser(); | |||
11183 | const AsmToken &Tok = Parser.getTok(); | |||
11184 | if (Tok.isNot(AsmToken::Identifier)) { | |||
11185 | Error(L, "unexpected token in .syntax directive"); | |||
11186 | return false; | |||
11187 | } | |||
11188 | ||||
11189 | StringRef Mode = Tok.getString(); | |||
11190 | Parser.Lex(); | |||
11191 | if (check(Mode == "divided" || Mode == "DIVIDED", L, | |||
11192 | "'.syntax divided' arm assembly not supported") || | |||
11193 | check(Mode != "unified" && Mode != "UNIFIED", L, | |||
11194 | "unrecognized syntax mode in .syntax directive") || | |||
11195 | parseToken(AsmToken::EndOfStatement, "unexpected token in directive")) | |||
11196 | return true; | |||
11197 | ||||
11198 | // TODO tell the MC streamer the mode | |||
11199 | // getParser().getStreamer().Emit???(); | |||
11200 | return false; | |||
11201 | } | |||
11202 | ||||
11203 | /// parseDirectiveCode | |||
11204 | /// ::= .code 16 | 32 | |||
11205 | bool ARMAsmParser::parseDirectiveCode(SMLoc L) { | |||
11206 | MCAsmParser &Parser = getParser(); | |||
11207 | const AsmToken &Tok = Parser.getTok(); | |||
11208 | if (Tok.isNot(AsmToken::Integer)) | |||
11209 | return Error(L, "unexpected token in .code directive"); | |||
11210 | int64_t Val = Parser.getTok().getIntVal(); | |||
11211 | if (Val != 16 && Val != 32) { | |||
11212 | Error(L, "invalid operand to .code directive"); | |||
11213 | return false; | |||
11214 | } | |||
11215 | Parser.Lex(); | |||
11216 | ||||
11217 | if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive")) | |||
11218 | return true; | |||
11219 | ||||
11220 | if (Val == 16) { | |||
11221 | if (!hasThumb()) | |||
11222 | return Error(L, "target does not support Thumb mode"); | |||
11223 | ||||
11224 | if (!isThumb()) | |||
11225 | SwitchMode(); | |||
11226 | getParser().getStreamer().emitAssemblerFlag(MCAF_Code16); | |||
11227 | } else { | |||
11228 | if (!hasARM()) | |||
11229 | return Error(L, "target does not support ARM mode"); | |||
11230 | ||||
11231 | if (isThumb()) | |||
11232 | SwitchMode(); | |||
11233 | getParser().getStreamer().emitAssemblerFlag(MCAF_Code32); | |||
11234 | } | |||
11235 | ||||
11236 | return false; | |||
11237 | } | |||
11238 | ||||
11239 | /// parseDirectiveReq | |||
11240 | /// ::= name .req registername | |||
11241 | bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) { | |||
11242 | MCAsmParser &Parser = getParser(); | |||
11243 | Parser.Lex(); // Eat the '.req' token. | |||
11244 | unsigned Reg; | |||
11245 | SMLoc SRegLoc, ERegLoc; | |||
11246 | if (check(ParseRegister(Reg, SRegLoc, ERegLoc), SRegLoc, | |||
11247 | "register name expected") || | |||
11248 | parseToken(AsmToken::EndOfStatement, | |||
11249 | "unexpected input in .req directive.")) | |||
11250 | return true; | |||
11251 | ||||
11252 | if (RegisterReqs.insert(std::make_pair(Name, Reg)).first->second != Reg) | |||
11253 | return Error(SRegLoc, | |||
11254 | "redefinition of '" + Name + "' does not match original."); | |||
11255 | ||||
11256 | return false; | |||
11257 | } | |||
11258 | ||||
11259 | /// parseDirectiveUneq | |||
11260 | /// ::= .unreq registername | |||
11261 | bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) { | |||
11262 | MCAsmParser &Parser = getParser(); | |||
11263 | if (Parser.getTok().isNot(AsmToken::Identifier)) | |||
11264 | return Error(L, "unexpected input in .unreq directive."); | |||
11265 | RegisterReqs.erase(Parser.getTok().getIdentifier().lower()); | |||
11266 | Parser.Lex(); // Eat the identifier. | |||
11267 | if (parseToken(AsmToken::EndOfStatement, | |||
11268 | "unexpected input in '.unreq' directive")) | |||
11269 | return true; | |||
11270 | return false; | |||
11271 | } | |||
11272 | ||||
11273 | // After changing arch/CPU, try to put the ARM/Thumb mode back to what it was | |||
11274 | // before, if supported by the new target, or emit mapping symbols for the mode | |||
11275 | // switch. | |||
11276 | void ARMAsmParser::FixModeAfterArchChange(bool WasThumb, SMLoc Loc) { | |||
11277 | if (WasThumb != isThumb()) { | |||
11278 | if (WasThumb && hasThumb()) { | |||
11279 | // Stay in Thumb mode | |||
11280 | SwitchMode(); | |||
11281 | } else if (!WasThumb && hasARM()) { | |||
11282 | // Stay in ARM mode | |||
11283 | SwitchMode(); | |||
11284 | } else { | |||
11285 | // Mode switch forced, because the new arch doesn't support the old mode. | |||
11286 | getParser().getStreamer().emitAssemblerFlag(isThumb() ? MCAF_Code16 | |||
11287 | : MCAF_Code32); | |||
11288 | // Warn about the implcit mode switch. GAS does not switch modes here, | |||
11289 | // but instead stays in the old mode, reporting an error on any following | |||
11290 | // instructions as the mode does not exist on the target. | |||
11291 | Warning(Loc, Twine("new target does not support ") + | |||
11292 | (WasThumb ? "thumb" : "arm") + " mode, switching to " + | |||
11293 | (!WasThumb ? "thumb" : "arm") + " mode"); | |||
11294 | } | |||
11295 | } | |||
11296 | } | |||
11297 | ||||
11298 | /// parseDirectiveArch | |||
11299 | /// ::= .arch token | |||
11300 | bool ARMAsmParser::parseDirectiveArch(SMLoc L) { | |||
11301 | StringRef Arch = getParser().parseStringToEndOfStatement().trim(); | |||
11302 | ARM::ArchKind ID = ARM::parseArch(Arch); | |||
11303 | ||||
11304 | if (ID == ARM::ArchKind::INVALID) | |||
11305 | return Error(L, "Unknown arch name"); | |||
11306 | ||||
11307 | bool WasThumb = isThumb(); | |||
11308 | Triple T; | |||
11309 | MCSubtargetInfo &STI = copySTI(); | |||
11310 | STI.setDefaultFeatures("", /*TuneCPU*/ "", | |||
11311 | ("+" + ARM::getArchName(ID)).str()); | |||
11312 | setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); | |||
11313 | FixModeAfterArchChange(WasThumb, L); | |||
11314 | ||||
11315 | getTargetStreamer().emitArch(ID); | |||
11316 | return false; | |||
11317 | } | |||
11318 | ||||
11319 | /// parseDirectiveEabiAttr | |||
11320 | /// ::= .eabi_attribute int, int [, "str"] | |||
11321 | /// ::= .eabi_attribute Tag_name, int [, "str"] | |||
11322 | bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) { | |||
11323 | MCAsmParser &Parser = getParser(); | |||
11324 | int64_t Tag; | |||
11325 | SMLoc TagLoc; | |||
11326 | TagLoc = Parser.getTok().getLoc(); | |||
11327 | if (Parser.getTok().is(AsmToken::Identifier)) { | |||
11328 | StringRef Name = Parser.getTok().getIdentifier(); | |||
11329 | Optional<unsigned> Ret = ELFAttrs::attrTypeFromString( | |||
11330 | Name, ARMBuildAttrs::getARMAttributeTags()); | |||
11331 | if (!Ret.hasValue()) { | |||
11332 | Error(TagLoc, "attribute name not recognised: " + Name); | |||
11333 | return false; | |||
11334 | } | |||
11335 | Tag = Ret.getValue(); | |||
11336 | Parser.Lex(); | |||
11337 | } else { | |||
11338 | const MCExpr *AttrExpr; | |||
11339 | ||||
11340 | TagLoc = Parser.getTok().getLoc(); | |||
11341 | if (Parser.parseExpression(AttrExpr)) | |||
11342 | return true; | |||
11343 | ||||
11344 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(AttrExpr); | |||
11345 | if (check(!CE, TagLoc, "expected numeric constant")) | |||
11346 | return true; | |||
11347 | ||||
11348 | Tag = CE->getValue(); | |||
11349 | } | |||
11350 | ||||
11351 | if (Parser.parseToken(AsmToken::Comma, "comma expected")) | |||
11352 | return true; | |||
11353 | ||||
11354 | StringRef StringValue = ""; | |||
11355 | bool IsStringValue = false; | |||
11356 | ||||
11357 | int64_t IntegerValue = 0; | |||
11358 | bool IsIntegerValue = false; | |||
11359 | ||||
11360 | if (Tag == ARMBuildAttrs::CPU_raw_name || Tag == ARMBuildAttrs::CPU_name) | |||
11361 | IsStringValue = true; | |||
11362 | else if (Tag == ARMBuildAttrs::compatibility) { | |||
11363 | IsStringValue = true; | |||
11364 | IsIntegerValue = true; | |||
11365 | } else if (Tag < 32 || Tag % 2 == 0) | |||
11366 | IsIntegerValue = true; | |||
11367 | else if (Tag % 2 == 1) | |||
11368 | IsStringValue = true; | |||
11369 | else | |||
11370 | llvm_unreachable("invalid tag type")::llvm::llvm_unreachable_internal("invalid tag type", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 11370); | |||
11371 | ||||
11372 | if (IsIntegerValue) { | |||
11373 | const MCExpr *ValueExpr; | |||
11374 | SMLoc ValueExprLoc = Parser.getTok().getLoc(); | |||
11375 | if (Parser.parseExpression(ValueExpr)) | |||
11376 | return true; | |||
11377 | ||||
11378 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ValueExpr); | |||
11379 | if (!CE) | |||
11380 | return Error(ValueExprLoc, "expected numeric constant"); | |||
11381 | IntegerValue = CE->getValue(); | |||
11382 | } | |||
11383 | ||||
11384 | if (Tag == ARMBuildAttrs::compatibility) { | |||
11385 | if (Parser.parseToken(AsmToken::Comma, "comma expected")) | |||
11386 | return true; | |||
11387 | } | |||
11388 | ||||
11389 | if (IsStringValue) { | |||
11390 | if (Parser.getTok().isNot(AsmToken::String)) | |||
11391 | return Error(Parser.getTok().getLoc(), "bad string constant"); | |||
11392 | ||||
11393 | StringValue = Parser.getTok().getStringContents(); | |||
11394 | Parser.Lex(); | |||
11395 | } | |||
11396 | ||||
11397 | if (Parser.parseToken(AsmToken::EndOfStatement, | |||
11398 | "unexpected token in '.eabi_attribute' directive")) | |||
11399 | return true; | |||
11400 | ||||
11401 | if (IsIntegerValue && IsStringValue) { | |||
11402 | assert(Tag == ARMBuildAttrs::compatibility)(static_cast <bool> (Tag == ARMBuildAttrs::compatibility ) ? void (0) : __assert_fail ("Tag == ARMBuildAttrs::compatibility" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 11402, __extension__ __PRETTY_FUNCTION__)); | |||
11403 | getTargetStreamer().emitIntTextAttribute(Tag, IntegerValue, StringValue); | |||
11404 | } else if (IsIntegerValue) | |||
11405 | getTargetStreamer().emitAttribute(Tag, IntegerValue); | |||
11406 | else if (IsStringValue) | |||
11407 | getTargetStreamer().emitTextAttribute(Tag, StringValue); | |||
11408 | return false; | |||
11409 | } | |||
11410 | ||||
11411 | /// parseDirectiveCPU | |||
11412 | /// ::= .cpu str | |||
11413 | bool ARMAsmParser::parseDirectiveCPU(SMLoc L) { | |||
11414 | StringRef CPU = getParser().parseStringToEndOfStatement().trim(); | |||
11415 | getTargetStreamer().emitTextAttribute(ARMBuildAttrs::CPU_name, CPU); | |||
11416 | ||||
11417 | // FIXME: This is using table-gen data, but should be moved to | |||
11418 | // ARMTargetParser once that is table-gen'd. | |||
11419 | if (!getSTI().isCPUStringValid(CPU)) | |||
11420 | return Error(L, "Unknown CPU name"); | |||
11421 | ||||
11422 | bool WasThumb = isThumb(); | |||
11423 | MCSubtargetInfo &STI = copySTI(); | |||
11424 | STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, ""); | |||
11425 | setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); | |||
11426 | FixModeAfterArchChange(WasThumb, L); | |||
11427 | ||||
11428 | return false; | |||
11429 | } | |||
11430 | ||||
11431 | /// parseDirectiveFPU | |||
11432 | /// ::= .fpu str | |||
11433 | bool ARMAsmParser::parseDirectiveFPU(SMLoc L) { | |||
11434 | SMLoc FPUNameLoc = getTok().getLoc(); | |||
11435 | StringRef FPU = getParser().parseStringToEndOfStatement().trim(); | |||
11436 | ||||
11437 | unsigned ID = ARM::parseFPU(FPU); | |||
11438 | std::vector<StringRef> Features; | |||
11439 | if (!ARM::getFPUFeatures(ID, Features)) | |||
11440 | return Error(FPUNameLoc, "Unknown FPU name"); | |||
11441 | ||||
11442 | MCSubtargetInfo &STI = copySTI(); | |||
11443 | for (auto Feature : Features) | |||
11444 | STI.ApplyFeatureFlag(Feature); | |||
11445 | setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); | |||
11446 | ||||
11447 | getTargetStreamer().emitFPU(ID); | |||
11448 | return false; | |||
11449 | } | |||
11450 | ||||
11451 | /// parseDirectiveFnStart | |||
11452 | /// ::= .fnstart | |||
11453 | bool ARMAsmParser::parseDirectiveFnStart(SMLoc L) { | |||
11454 | if (parseToken(AsmToken::EndOfStatement, | |||
11455 | "unexpected token in '.fnstart' directive")) | |||
11456 | return true; | |||
11457 | ||||
11458 | if (UC.hasFnStart()) { | |||
11459 | Error(L, ".fnstart starts before the end of previous one"); | |||
11460 | UC.emitFnStartLocNotes(); | |||
11461 | return true; | |||
11462 | } | |||
11463 | ||||
11464 | // Reset the unwind directives parser state | |||
11465 | UC.reset(); | |||
11466 | ||||
11467 | getTargetStreamer().emitFnStart(); | |||
11468 | ||||
11469 | UC.recordFnStart(L); | |||
11470 | return false; | |||
11471 | } | |||
11472 | ||||
11473 | /// parseDirectiveFnEnd | |||
11474 | /// ::= .fnend | |||
11475 | bool ARMAsmParser::parseDirectiveFnEnd(SMLoc L) { | |||
11476 | if (parseToken(AsmToken::EndOfStatement, | |||
11477 | "unexpected token in '.fnend' directive")) | |||
11478 | return true; | |||
11479 | // Check the ordering of unwind directives | |||
11480 | if (!UC.hasFnStart()) | |||
11481 | return Error(L, ".fnstart must precede .fnend directive"); | |||
11482 | ||||
11483 | // Reset the unwind directives parser state | |||
11484 | getTargetStreamer().emitFnEnd(); | |||
11485 | ||||
11486 | UC.reset(); | |||
11487 | return false; | |||
11488 | } | |||
11489 | ||||
11490 | /// parseDirectiveCantUnwind | |||
11491 | /// ::= .cantunwind | |||
11492 | bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) { | |||
11493 | if (parseToken(AsmToken::EndOfStatement, | |||
11494 | "unexpected token in '.cantunwind' directive")) | |||
11495 | return true; | |||
11496 | ||||
11497 | UC.recordCantUnwind(L); | |||
11498 | // Check the ordering of unwind directives | |||
11499 | if (check(!UC.hasFnStart(), L, ".fnstart must precede .cantunwind directive")) | |||
11500 | return true; | |||
11501 | ||||
11502 | if (UC.hasHandlerData()) { | |||
11503 | Error(L, ".cantunwind can't be used with .handlerdata directive"); | |||
11504 | UC.emitHandlerDataLocNotes(); | |||
11505 | return true; | |||
11506 | } | |||
11507 | if (UC.hasPersonality()) { | |||
11508 | Error(L, ".cantunwind can't be used with .personality directive"); | |||
11509 | UC.emitPersonalityLocNotes(); | |||
11510 | return true; | |||
11511 | } | |||
11512 | ||||
11513 | getTargetStreamer().emitCantUnwind(); | |||
11514 | return false; | |||
11515 | } | |||
11516 | ||||
11517 | /// parseDirectivePersonality | |||
11518 | /// ::= .personality name | |||
11519 | bool ARMAsmParser::parseDirectivePersonality(SMLoc L) { | |||
11520 | MCAsmParser &Parser = getParser(); | |||
11521 | bool HasExistingPersonality = UC.hasPersonality(); | |||
11522 | ||||
11523 | // Parse the name of the personality routine | |||
11524 | if (Parser.getTok().isNot(AsmToken::Identifier)) | |||
11525 | return Error(L, "unexpected input in .personality directive."); | |||
11526 | StringRef Name(Parser.getTok().getIdentifier()); | |||
11527 | Parser.Lex(); | |||
11528 | ||||
11529 | if (parseToken(AsmToken::EndOfStatement, | |||
11530 | "unexpected token in '.personality' directive")) | |||
11531 | return true; | |||
11532 | ||||
11533 | UC.recordPersonality(L); | |||
11534 | ||||
11535 | // Check the ordering of unwind directives | |||
11536 | if (!UC.hasFnStart()) | |||
11537 | return Error(L, ".fnstart must precede .personality directive"); | |||
11538 | if (UC.cantUnwind()) { | |||
11539 | Error(L, ".personality can't be used with .cantunwind directive"); | |||
11540 | UC.emitCantUnwindLocNotes(); | |||
11541 | return true; | |||
11542 | } | |||
11543 | if (UC.hasHandlerData()) { | |||
11544 | Error(L, ".personality must precede .handlerdata directive"); | |||
11545 | UC.emitHandlerDataLocNotes(); | |||
11546 | return true; | |||
11547 | } | |||
11548 | if (HasExistingPersonality) { | |||
11549 | Error(L, "multiple personality directives"); | |||
11550 | UC.emitPersonalityLocNotes(); | |||
11551 | return true; | |||
11552 | } | |||
11553 | ||||
11554 | MCSymbol *PR = getParser().getContext().getOrCreateSymbol(Name); | |||
11555 | getTargetStreamer().emitPersonality(PR); | |||
11556 | return false; | |||
11557 | } | |||
11558 | ||||
11559 | /// parseDirectiveHandlerData | |||
11560 | /// ::= .handlerdata | |||
11561 | bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) { | |||
11562 | if (parseToken(AsmToken::EndOfStatement, | |||
11563 | "unexpected token in '.handlerdata' directive")) | |||
11564 | return true; | |||
11565 | ||||
11566 | UC.recordHandlerData(L); | |||
11567 | // Check the ordering of unwind directives | |||
11568 | if (!UC.hasFnStart()) | |||
11569 | return Error(L, ".fnstart must precede .personality directive"); | |||
11570 | if (UC.cantUnwind()) { | |||
11571 | Error(L, ".handlerdata can't be used with .cantunwind directive"); | |||
11572 | UC.emitCantUnwindLocNotes(); | |||
11573 | return true; | |||
11574 | } | |||
11575 | ||||
11576 | getTargetStreamer().emitHandlerData(); | |||
11577 | return false; | |||
11578 | } | |||
11579 | ||||
11580 | /// parseDirectiveSetFP | |||
11581 | /// ::= .setfp fpreg, spreg [, offset] | |||
11582 | bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) { | |||
11583 | MCAsmParser &Parser = getParser(); | |||
11584 | // Check the ordering of unwind directives | |||
11585 | if (check(!UC.hasFnStart(), L, ".fnstart must precede .setfp directive") || | |||
11586 | check(UC.hasHandlerData(), L, | |||
11587 | ".setfp must precede .handlerdata directive")) | |||
11588 | return true; | |||
11589 | ||||
11590 | // Parse fpreg | |||
11591 | SMLoc FPRegLoc = Parser.getTok().getLoc(); | |||
11592 | int FPReg = tryParseRegister(); | |||
11593 | ||||
11594 | if (check(FPReg == -1, FPRegLoc, "frame pointer register expected") || | |||
11595 | Parser.parseToken(AsmToken::Comma, "comma expected")) | |||
11596 | return true; | |||
11597 | ||||
11598 | // Parse spreg | |||
11599 | SMLoc SPRegLoc = Parser.getTok().getLoc(); | |||
11600 | int SPReg = tryParseRegister(); | |||
11601 | if (check(SPReg == -1, SPRegLoc, "stack pointer register expected") || | |||
11602 | check(SPReg != ARM::SP && SPReg != UC.getFPReg(), SPRegLoc, | |||
11603 | "register should be either $sp or the latest fp register")) | |||
11604 | return true; | |||
11605 | ||||
11606 | // Update the frame pointer register | |||
11607 | UC.saveFPReg(FPReg); | |||
11608 | ||||
11609 | // Parse offset | |||
11610 | int64_t Offset = 0; | |||
11611 | if (Parser.parseOptionalToken(AsmToken::Comma)) { | |||
11612 | if (Parser.getTok().isNot(AsmToken::Hash) && | |||
11613 | Parser.getTok().isNot(AsmToken::Dollar)) | |||
11614 | return Error(Parser.getTok().getLoc(), "'#' expected"); | |||
11615 | Parser.Lex(); // skip hash token. | |||
11616 | ||||
11617 | const MCExpr *OffsetExpr; | |||
11618 | SMLoc ExLoc = Parser.getTok().getLoc(); | |||
11619 | SMLoc EndLoc; | |||
11620 | if (getParser().parseExpression(OffsetExpr, EndLoc)) | |||
11621 | return Error(ExLoc, "malformed setfp offset"); | |||
11622 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr); | |||
11623 | if (check(!CE, ExLoc, "setfp offset must be an immediate")) | |||
11624 | return true; | |||
11625 | Offset = CE->getValue(); | |||
11626 | } | |||
11627 | ||||
11628 | if (Parser.parseToken(AsmToken::EndOfStatement)) | |||
11629 | return true; | |||
11630 | ||||
11631 | getTargetStreamer().emitSetFP(static_cast<unsigned>(FPReg), | |||
11632 | static_cast<unsigned>(SPReg), Offset); | |||
11633 | return false; | |||
11634 | } | |||
11635 | ||||
11636 | /// parseDirective | |||
11637 | /// ::= .pad offset | |||
11638 | bool ARMAsmParser::parseDirectivePad(SMLoc L) { | |||
11639 | MCAsmParser &Parser = getParser(); | |||
11640 | // Check the ordering of unwind directives | |||
11641 | if (!UC.hasFnStart()) | |||
11642 | return Error(L, ".fnstart must precede .pad directive"); | |||
11643 | if (UC.hasHandlerData()) | |||
11644 | return Error(L, ".pad must precede .handlerdata directive"); | |||
11645 | ||||
11646 | // Parse the offset | |||
11647 | if (Parser.getTok().isNot(AsmToken::Hash) && | |||
11648 | Parser.getTok().isNot(AsmToken::Dollar)) | |||
11649 | return Error(Parser.getTok().getLoc(), "'#' expected"); | |||
11650 | Parser.Lex(); // skip hash token. | |||
11651 | ||||
11652 | const MCExpr *OffsetExpr; | |||
11653 | SMLoc ExLoc = Parser.getTok().getLoc(); | |||
11654 | SMLoc EndLoc; | |||
11655 | if (getParser().parseExpression(OffsetExpr, EndLoc)) | |||
11656 | return Error(ExLoc, "malformed pad offset"); | |||
11657 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr); | |||
11658 | if (!CE) | |||
11659 | return Error(ExLoc, "pad offset must be an immediate"); | |||
11660 | ||||
11661 | if (parseToken(AsmToken::EndOfStatement, | |||
11662 | "unexpected token in '.pad' directive")) | |||
11663 | return true; | |||
11664 | ||||
11665 | getTargetStreamer().emitPad(CE->getValue()); | |||
11666 | return false; | |||
11667 | } | |||
11668 | ||||
11669 | /// parseDirectiveRegSave | |||
11670 | /// ::= .save { registers } | |||
11671 | /// ::= .vsave { registers } | |||
11672 | bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) { | |||
11673 | // Check the ordering of unwind directives | |||
11674 | if (!UC.hasFnStart()) | |||
11675 | return Error(L, ".fnstart must precede .save or .vsave directives"); | |||
11676 | if (UC.hasHandlerData()) | |||
11677 | return Error(L, ".save or .vsave must precede .handlerdata directive"); | |||
11678 | ||||
11679 | // RAII object to make sure parsed operands are deleted. | |||
11680 | SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands; | |||
11681 | ||||
11682 | // Parse the register list | |||
11683 | if (parseRegisterList(Operands) || | |||
11684 | parseToken(AsmToken::EndOfStatement, "unexpected token in directive")) | |||
11685 | return true; | |||
11686 | ARMOperand &Op = (ARMOperand &)*Operands[0]; | |||
11687 | if (!IsVector && !Op.isRegList()) | |||
11688 | return Error(L, ".save expects GPR registers"); | |||
11689 | if (IsVector && !Op.isDPRRegList()) | |||
11690 | return Error(L, ".vsave expects DPR registers"); | |||
11691 | ||||
11692 | getTargetStreamer().emitRegSave(Op.getRegList(), IsVector); | |||
11693 | return false; | |||
11694 | } | |||
11695 | ||||
11696 | /// parseDirectiveInst | |||
11697 | /// ::= .inst opcode [, ...] | |||
11698 | /// ::= .inst.n opcode [, ...] | |||
11699 | /// ::= .inst.w opcode [, ...] | |||
11700 | bool ARMAsmParser::parseDirectiveInst(SMLoc Loc, char Suffix) { | |||
11701 | int Width = 4; | |||
11702 | ||||
11703 | if (isThumb()) { | |||
11704 | switch (Suffix) { | |||
11705 | case 'n': | |||
11706 | Width = 2; | |||
11707 | break; | |||
11708 | case 'w': | |||
11709 | break; | |||
11710 | default: | |||
11711 | Width = 0; | |||
11712 | break; | |||
11713 | } | |||
11714 | } else { | |||
11715 | if (Suffix) | |||
11716 | return Error(Loc, "width suffixes are invalid in ARM mode"); | |||
11717 | } | |||
11718 | ||||
11719 | auto parseOne = [&]() -> bool { | |||
11720 | const MCExpr *Expr; | |||
11721 | if (getParser().parseExpression(Expr)) | |||
11722 | return true; | |||
11723 | const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr); | |||
11724 | if (!Value) { | |||
11725 | return Error(Loc, "expected constant expression"); | |||
11726 | } | |||
11727 | ||||
11728 | char CurSuffix = Suffix; | |||
11729 | switch (Width) { | |||
11730 | case 2: | |||
11731 | if (Value->getValue() > 0xffff) | |||
11732 | return Error(Loc, "inst.n operand is too big, use inst.w instead"); | |||
11733 | break; | |||
11734 | case 4: | |||
11735 | if (Value->getValue() > 0xffffffff) | |||
11736 | return Error(Loc, StringRef(Suffix ? "inst.w" : "inst") + | |||
11737 | " operand is too big"); | |||
11738 | break; | |||
11739 | case 0: | |||
11740 | // Thumb mode, no width indicated. Guess from the opcode, if possible. | |||
11741 | if (Value->getValue() < 0xe800) | |||
11742 | CurSuffix = 'n'; | |||
11743 | else if (Value->getValue() >= 0xe8000000) | |||
11744 | CurSuffix = 'w'; | |||
11745 | else | |||
11746 | return Error(Loc, "cannot determine Thumb instruction size, " | |||
11747 | "use inst.n/inst.w instead"); | |||
11748 | break; | |||
11749 | default: | |||
11750 | llvm_unreachable("only supported widths are 2 and 4")::llvm::llvm_unreachable_internal("only supported widths are 2 and 4" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 11750); | |||
11751 | } | |||
11752 | ||||
11753 | getTargetStreamer().emitInst(Value->getValue(), CurSuffix); | |||
11754 | return false; | |||
11755 | }; | |||
11756 | ||||
11757 | if (parseOptionalToken(AsmToken::EndOfStatement)) | |||
11758 | return Error(Loc, "expected expression following directive"); | |||
11759 | if (parseMany(parseOne)) | |||
11760 | return true; | |||
11761 | return false; | |||
11762 | } | |||
11763 | ||||
11764 | /// parseDirectiveLtorg | |||
11765 | /// ::= .ltorg | .pool | |||
11766 | bool ARMAsmParser::parseDirectiveLtorg(SMLoc L) { | |||
11767 | if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive")) | |||
11768 | return true; | |||
11769 | getTargetStreamer().emitCurrentConstantPool(); | |||
11770 | return false; | |||
11771 | } | |||
11772 | ||||
11773 | bool ARMAsmParser::parseDirectiveEven(SMLoc L) { | |||
11774 | const MCSection *Section = getStreamer().getCurrentSectionOnly(); | |||
11775 | ||||
11776 | if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive")) | |||
11777 | return true; | |||
11778 | ||||
11779 | if (!Section) { | |||
11780 | getStreamer().InitSections(false); | |||
11781 | Section = getStreamer().getCurrentSectionOnly(); | |||
11782 | } | |||
11783 | ||||
11784 | assert(Section && "must have section to emit alignment")(static_cast <bool> (Section && "must have section to emit alignment" ) ? void (0) : __assert_fail ("Section && \"must have section to emit alignment\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 11784, __extension__ __PRETTY_FUNCTION__)); | |||
11785 | if (Section->UseCodeAlign()) | |||
11786 | getStreamer().emitCodeAlignment(2); | |||
11787 | else | |||
11788 | getStreamer().emitValueToAlignment(2); | |||
11789 | ||||
11790 | return false; | |||
11791 | } | |||
11792 | ||||
11793 | /// parseDirectivePersonalityIndex | |||
11794 | /// ::= .personalityindex index | |||
11795 | bool ARMAsmParser::parseDirectivePersonalityIndex(SMLoc L) { | |||
11796 | MCAsmParser &Parser = getParser(); | |||
11797 | bool HasExistingPersonality = UC.hasPersonality(); | |||
11798 | ||||
11799 | const MCExpr *IndexExpression; | |||
11800 | SMLoc IndexLoc = Parser.getTok().getLoc(); | |||
11801 | if (Parser.parseExpression(IndexExpression) || | |||
11802 | parseToken(AsmToken::EndOfStatement, | |||
11803 | "unexpected token in '.personalityindex' directive")) { | |||
11804 | return true; | |||
11805 | } | |||
11806 | ||||
11807 | UC.recordPersonalityIndex(L); | |||
11808 | ||||
11809 | if (!UC.hasFnStart()) { | |||
11810 | return Error(L, ".fnstart must precede .personalityindex directive"); | |||
11811 | } | |||
11812 | if (UC.cantUnwind()) { | |||
11813 | Error(L, ".personalityindex cannot be used with .cantunwind"); | |||
11814 | UC.emitCantUnwindLocNotes(); | |||
11815 | return true; | |||
11816 | } | |||
11817 | if (UC.hasHandlerData()) { | |||
11818 | Error(L, ".personalityindex must precede .handlerdata directive"); | |||
11819 | UC.emitHandlerDataLocNotes(); | |||
11820 | return true; | |||
11821 | } | |||
11822 | if (HasExistingPersonality) { | |||
11823 | Error(L, "multiple personality directives"); | |||
11824 | UC.emitPersonalityLocNotes(); | |||
11825 | return true; | |||
11826 | } | |||
11827 | ||||
11828 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(IndexExpression); | |||
11829 | if (!CE) | |||
11830 | return Error(IndexLoc, "index must be a constant number"); | |||
11831 | if (CE->getValue() < 0 || CE->getValue() >= ARM::EHABI::NUM_PERSONALITY_INDEX) | |||
11832 | return Error(IndexLoc, | |||
11833 | "personality routine index should be in range [0-3]"); | |||
11834 | ||||
11835 | getTargetStreamer().emitPersonalityIndex(CE->getValue()); | |||
11836 | return false; | |||
11837 | } | |||
11838 | ||||
11839 | /// parseDirectiveUnwindRaw | |||
11840 | /// ::= .unwind_raw offset, opcode [, opcode...] | |||
11841 | bool ARMAsmParser::parseDirectiveUnwindRaw(SMLoc L) { | |||
11842 | MCAsmParser &Parser = getParser(); | |||
11843 | int64_t StackOffset; | |||
11844 | const MCExpr *OffsetExpr; | |||
11845 | SMLoc OffsetLoc = getLexer().getLoc(); | |||
11846 | ||||
11847 | if (!UC.hasFnStart()) | |||
11848 | return Error(L, ".fnstart must precede .unwind_raw directives"); | |||
11849 | if (getParser().parseExpression(OffsetExpr)) | |||
11850 | return Error(OffsetLoc, "expected expression"); | |||
11851 | ||||
11852 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr); | |||
11853 | if (!CE) | |||
11854 | return Error(OffsetLoc, "offset must be a constant"); | |||
11855 | ||||
11856 | StackOffset = CE->getValue(); | |||
11857 | ||||
11858 | if (Parser.parseToken(AsmToken::Comma, "expected comma")) | |||
11859 | return true; | |||
11860 | ||||
11861 | SmallVector<uint8_t, 16> Opcodes; | |||
11862 | ||||
11863 | auto parseOne = [&]() -> bool { | |||
11864 | const MCExpr *OE = nullptr; | |||
11865 | SMLoc OpcodeLoc = getLexer().getLoc(); | |||
11866 | if (check(getLexer().is(AsmToken::EndOfStatement) || | |||
11867 | Parser.parseExpression(OE), | |||
11868 | OpcodeLoc, "expected opcode expression")) | |||
11869 | return true; | |||
11870 | const MCConstantExpr *OC = dyn_cast<MCConstantExpr>(OE); | |||
11871 | if (!OC) | |||
11872 | return Error(OpcodeLoc, "opcode value must be a constant"); | |||
11873 | const int64_t Opcode = OC->getValue(); | |||
11874 | if (Opcode & ~0xff) | |||
11875 | return Error(OpcodeLoc, "invalid opcode"); | |||
11876 | Opcodes.push_back(uint8_t(Opcode)); | |||
11877 | return false; | |||
11878 | }; | |||
11879 | ||||
11880 | // Must have at least 1 element | |||
11881 | SMLoc OpcodeLoc = getLexer().getLoc(); | |||
11882 | if (parseOptionalToken(AsmToken::EndOfStatement)) | |||
11883 | return Error(OpcodeLoc, "expected opcode expression"); | |||
11884 | if (parseMany(parseOne)) | |||
11885 | return true; | |||
11886 | ||||
11887 | getTargetStreamer().emitUnwindRaw(StackOffset, Opcodes); | |||
11888 | return false; | |||
11889 | } | |||
11890 | ||||
11891 | /// parseDirectiveTLSDescSeq | |||
11892 | /// ::= .tlsdescseq tls-variable | |||
11893 | bool ARMAsmParser::parseDirectiveTLSDescSeq(SMLoc L) { | |||
11894 | MCAsmParser &Parser = getParser(); | |||
11895 | ||||
11896 | if (getLexer().isNot(AsmToken::Identifier)) | |||
11897 | return TokError("expected variable after '.tlsdescseq' directive"); | |||
11898 | ||||
11899 | const MCSymbolRefExpr *SRE = | |||
11900 | MCSymbolRefExpr::create(Parser.getTok().getIdentifier(), | |||
11901 | MCSymbolRefExpr::VK_ARM_TLSDESCSEQ, getContext()); | |||
11902 | Lex(); | |||
11903 | ||||
11904 | if (parseToken(AsmToken::EndOfStatement, | |||
11905 | "unexpected token in '.tlsdescseq' directive")) | |||
11906 | return true; | |||
11907 | ||||
11908 | getTargetStreamer().AnnotateTLSDescriptorSequence(SRE); | |||
11909 | return false; | |||
11910 | } | |||
11911 | ||||
11912 | /// parseDirectiveMovSP | |||
11913 | /// ::= .movsp reg [, #offset] | |||
11914 | bool ARMAsmParser::parseDirectiveMovSP(SMLoc L) { | |||
11915 | MCAsmParser &Parser = getParser(); | |||
11916 | if (!UC.hasFnStart()) | |||
11917 | return Error(L, ".fnstart must precede .movsp directives"); | |||
11918 | if (UC.getFPReg() != ARM::SP) | |||
11919 | return Error(L, "unexpected .movsp directive"); | |||
11920 | ||||
11921 | SMLoc SPRegLoc = Parser.getTok().getLoc(); | |||
11922 | int SPReg = tryParseRegister(); | |||
11923 | if (SPReg == -1) | |||
11924 | return Error(SPRegLoc, "register expected"); | |||
11925 | if (SPReg == ARM::SP || SPReg == ARM::PC) | |||
11926 | return Error(SPRegLoc, "sp and pc are not permitted in .movsp directive"); | |||
11927 | ||||
11928 | int64_t Offset = 0; | |||
11929 | if (Parser.parseOptionalToken(AsmToken::Comma)) { | |||
11930 | if (Parser.parseToken(AsmToken::Hash, "expected #constant")) | |||
11931 | return true; | |||
11932 | ||||
11933 | const MCExpr *OffsetExpr; | |||
11934 | SMLoc OffsetLoc = Parser.getTok().getLoc(); | |||
11935 | ||||
11936 | if (Parser.parseExpression(OffsetExpr)) | |||
11937 | return Error(OffsetLoc, "malformed offset expression"); | |||
11938 | ||||
11939 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr); | |||
11940 | if (!CE) | |||
11941 | return Error(OffsetLoc, "offset must be an immediate constant"); | |||
11942 | ||||
11943 | Offset = CE->getValue(); | |||
11944 | } | |||
11945 | ||||
11946 | if (parseToken(AsmToken::EndOfStatement, | |||
11947 | "unexpected token in '.movsp' directive")) | |||
11948 | return true; | |||
11949 | ||||
11950 | getTargetStreamer().emitMovSP(SPReg, Offset); | |||
11951 | UC.saveFPReg(SPReg); | |||
11952 | ||||
11953 | return false; | |||
11954 | } | |||
11955 | ||||
11956 | /// parseDirectiveObjectArch | |||
11957 | /// ::= .object_arch name | |||
11958 | bool ARMAsmParser::parseDirectiveObjectArch(SMLoc L) { | |||
11959 | MCAsmParser &Parser = getParser(); | |||
11960 | if (getLexer().isNot(AsmToken::Identifier)) | |||
11961 | return Error(getLexer().getLoc(), "unexpected token"); | |||
11962 | ||||
11963 | StringRef Arch = Parser.getTok().getString(); | |||
11964 | SMLoc ArchLoc = Parser.getTok().getLoc(); | |||
11965 | Lex(); | |||
11966 | ||||
11967 | ARM::ArchKind ID = ARM::parseArch(Arch); | |||
11968 | ||||
11969 | if (ID == ARM::ArchKind::INVALID) | |||
11970 | return Error(ArchLoc, "unknown architecture '" + Arch + "'"); | |||
11971 | if (parseToken(AsmToken::EndOfStatement)) | |||
11972 | return true; | |||
11973 | ||||
11974 | getTargetStreamer().emitObjectArch(ID); | |||
11975 | return false; | |||
11976 | } | |||
11977 | ||||
11978 | /// parseDirectiveAlign | |||
11979 | /// ::= .align | |||
11980 | bool ARMAsmParser::parseDirectiveAlign(SMLoc L) { | |||
11981 | // NOTE: if this is not the end of the statement, fall back to the target | |||
11982 | // agnostic handling for this directive which will correctly handle this. | |||
11983 | if (parseOptionalToken(AsmToken::EndOfStatement)) { | |||
11984 | // '.align' is target specifically handled to mean 2**2 byte alignment. | |||
11985 | const MCSection *Section = getStreamer().getCurrentSectionOnly(); | |||
11986 | assert(Section && "must have section to emit alignment")(static_cast <bool> (Section && "must have section to emit alignment" ) ? void (0) : __assert_fail ("Section && \"must have section to emit alignment\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 11986, __extension__ __PRETTY_FUNCTION__)); | |||
11987 | if (Section->UseCodeAlign()) | |||
11988 | getStreamer().emitCodeAlignment(4, 0); | |||
11989 | else | |||
11990 | getStreamer().emitValueToAlignment(4, 0, 1, 0); | |||
11991 | return false; | |||
11992 | } | |||
11993 | return true; | |||
11994 | } | |||
11995 | ||||
11996 | /// parseDirectiveThumbSet | |||
11997 | /// ::= .thumb_set name, value | |||
11998 | bool ARMAsmParser::parseDirectiveThumbSet(SMLoc L) { | |||
11999 | MCAsmParser &Parser = getParser(); | |||
12000 | ||||
12001 | StringRef Name; | |||
12002 | if (check(Parser.parseIdentifier(Name), | |||
12003 | "expected identifier after '.thumb_set'") || | |||
12004 | parseToken(AsmToken::Comma, "expected comma after name '" + Name + "'")) | |||
12005 | return true; | |||
12006 | ||||
12007 | MCSymbol *Sym; | |||
12008 | const MCExpr *Value; | |||
12009 | if (MCParserUtils::parseAssignmentExpression(Name, /* allow_redef */ true, | |||
12010 | Parser, Sym, Value)) | |||
12011 | return true; | |||
12012 | ||||
12013 | getTargetStreamer().emitThumbSet(Sym, Value); | |||
12014 | return false; | |||
12015 | } | |||
12016 | ||||
12017 | /// Force static initialization. | |||
12018 | extern "C" LLVM_EXTERNAL_VISIBILITY__attribute__ ((visibility("default"))) void LLVMInitializeARMAsmParser() { | |||
12019 | RegisterMCAsmParser<ARMAsmParser> X(getTheARMLETarget()); | |||
12020 | RegisterMCAsmParser<ARMAsmParser> Y(getTheARMBETarget()); | |||
12021 | RegisterMCAsmParser<ARMAsmParser> A(getTheThumbLETarget()); | |||
12022 | RegisterMCAsmParser<ARMAsmParser> B(getTheThumbBETarget()); | |||
12023 | } | |||
12024 | ||||
12025 | #define GET_REGISTER_MATCHER | |||
12026 | #define GET_SUBTARGET_FEATURE_NAME | |||
12027 | #define GET_MATCHER_IMPLEMENTATION | |||
12028 | #define GET_MNEMONIC_SPELL_CHECKER | |||
12029 | #include "ARMGenAsmMatcher.inc" | |||
12030 | ||||
12031 | // Some diagnostics need to vary with subtarget features, so they are handled | |||
12032 | // here. For example, the DPR class has either 16 or 32 registers, depending | |||
12033 | // on the FPU available. | |||
12034 | const char * | |||
12035 | ARMAsmParser::getCustomOperandDiag(ARMMatchResultTy MatchError) { | |||
12036 | switch (MatchError) { | |||
12037 | // rGPR contains sp starting with ARMv8. | |||
12038 | case Match_rGPR: | |||
12039 | return hasV8Ops() ? "operand must be a register in range [r0, r14]" | |||
12040 | : "operand must be a register in range [r0, r12] or r14"; | |||
12041 | // DPR contains 16 registers for some FPUs, and 32 for others. | |||
12042 | case Match_DPR: | |||
12043 | return hasD32() ? "operand must be a register in range [d0, d31]" | |||
12044 | : "operand must be a register in range [d0, d15]"; | |||
12045 | case Match_DPR_RegList: | |||
12046 | return hasD32() ? "operand must be a list of registers in range [d0, d31]" | |||
12047 | : "operand must be a list of registers in range [d0, d15]"; | |||
12048 | ||||
12049 | // For all other diags, use the static string from tablegen. | |||
12050 | default: | |||
12051 | return getMatchKindDiag(MatchError); | |||
12052 | } | |||
12053 | } | |||
12054 | ||||
12055 | // Process the list of near-misses, throwing away ones we don't want to report | |||
12056 | // to the user, and converting the rest to a source location and string that | |||
12057 | // should be reported. | |||
12058 | void | |||
12059 | ARMAsmParser::FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn, | |||
12060 | SmallVectorImpl<NearMissMessage> &NearMissesOut, | |||
12061 | SMLoc IDLoc, OperandVector &Operands) { | |||
12062 | // TODO: If operand didn't match, sub in a dummy one and run target | |||
12063 | // predicate, so that we can avoid reporting near-misses that are invalid? | |||
12064 | // TODO: Many operand types dont have SuperClasses set, so we report | |||
12065 | // redundant ones. | |||
12066 | // TODO: Some operands are superclasses of registers (e.g. | |||
12067 | // MCK_RegShiftedImm), we don't have any way to represent that currently. | |||
12068 | // TODO: This is not all ARM-specific, can some of it be factored out? | |||
12069 | ||||
12070 | // Record some information about near-misses that we have already seen, so | |||
12071 | // that we can avoid reporting redundant ones. For example, if there are | |||
12072 | // variants of an instruction that take 8- and 16-bit immediates, we want | |||
12073 | // to only report the widest one. | |||
12074 | std::multimap<unsigned, unsigned> OperandMissesSeen; | |||
12075 | SmallSet<FeatureBitset, 4> FeatureMissesSeen; | |||
12076 | bool ReportedTooFewOperands = false; | |||
12077 | ||||
12078 | // Process the near-misses in reverse order, so that we see more general ones | |||
12079 | // first, and so can avoid emitting more specific ones. | |||
12080 | for (NearMissInfo &I : reverse(NearMissesIn)) { | |||
12081 | switch (I.getKind()) { | |||
12082 | case NearMissInfo::NearMissOperand: { | |||
12083 | SMLoc OperandLoc = | |||
12084 | ((ARMOperand &)*Operands[I.getOperandIndex()]).getStartLoc(); | |||
12085 | const char *OperandDiag = | |||
12086 | getCustomOperandDiag((ARMMatchResultTy)I.getOperandError()); | |||
12087 | ||||
12088 | // If we have already emitted a message for a superclass, don't also report | |||
12089 | // the sub-class. We consider all operand classes that we don't have a | |||
12090 | // specialised diagnostic for to be equal for the propose of this check, | |||
12091 | // so that we don't report the generic error multiple times on the same | |||
12092 | // operand. | |||
12093 | unsigned DupCheckMatchClass = OperandDiag ? I.getOperandClass() : ~0U; | |||
12094 | auto PrevReports = OperandMissesSeen.equal_range(I.getOperandIndex()); | |||
12095 | if (std::any_of(PrevReports.first, PrevReports.second, | |||
12096 | [DupCheckMatchClass]( | |||
12097 | const std::pair<unsigned, unsigned> Pair) { | |||
12098 | if (DupCheckMatchClass == ~0U || Pair.second == ~0U) | |||
12099 | return Pair.second == DupCheckMatchClass; | |||
12100 | else | |||
12101 | return isSubclass((MatchClassKind)DupCheckMatchClass, | |||
12102 | (MatchClassKind)Pair.second); | |||
12103 | })) | |||
12104 | break; | |||
12105 | OperandMissesSeen.insert( | |||
12106 | std::make_pair(I.getOperandIndex(), DupCheckMatchClass)); | |||
12107 | ||||
12108 | NearMissMessage Message; | |||
12109 | Message.Loc = OperandLoc; | |||
12110 | if (OperandDiag) { | |||
12111 | Message.Message = OperandDiag; | |||
12112 | } else if (I.getOperandClass() == InvalidMatchClass) { | |||
12113 | Message.Message = "too many operands for instruction"; | |||
12114 | } else { | |||
12115 | Message.Message = "invalid operand for instruction"; | |||
12116 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("asm-parser")) { dbgs() << "Missing diagnostic string for operand class " << getMatchClassName((MatchClassKind)I.getOperandClass ()) << I.getOperandClass() << ", error " << I.getOperandError() << ", opcode " << MII.getName (I.getOpcode()) << "\n"; } } while (false) | |||
12117 | dbgs() << "Missing diagnostic string for operand class "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("asm-parser")) { dbgs() << "Missing diagnostic string for operand class " << getMatchClassName((MatchClassKind)I.getOperandClass ()) << I.getOperandClass() << ", error " << I.getOperandError() << ", opcode " << MII.getName (I.getOpcode()) << "\n"; } } while (false) | |||
12118 | << getMatchClassName((MatchClassKind)I.getOperandClass())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("asm-parser")) { dbgs() << "Missing diagnostic string for operand class " << getMatchClassName((MatchClassKind)I.getOperandClass ()) << I.getOperandClass() << ", error " << I.getOperandError() << ", opcode " << MII.getName (I.getOpcode()) << "\n"; } } while (false) | |||
12119 | << I.getOperandClass() << ", error " << I.getOperandError()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("asm-parser")) { dbgs() << "Missing diagnostic string for operand class " << getMatchClassName((MatchClassKind)I.getOperandClass ()) << I.getOperandClass() << ", error " << I.getOperandError() << ", opcode " << MII.getName (I.getOpcode()) << "\n"; } } while (false) | |||
12120 | << ", opcode " << MII.getName(I.getOpcode()) << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("asm-parser")) { dbgs() << "Missing diagnostic string for operand class " << getMatchClassName((MatchClassKind)I.getOperandClass ()) << I.getOperandClass() << ", error " << I.getOperandError() << ", opcode " << MII.getName (I.getOpcode()) << "\n"; } } while (false); | |||
12121 | } | |||
12122 | NearMissesOut.emplace_back(Message); | |||
12123 | break; | |||
12124 | } | |||
12125 | case NearMissInfo::NearMissFeature: { | |||
12126 | const FeatureBitset &MissingFeatures = I.getFeatures(); | |||
12127 | // Don't report the same set of features twice. | |||
12128 | if (FeatureMissesSeen.count(MissingFeatures)) | |||
12129 | break; | |||
12130 | FeatureMissesSeen.insert(MissingFeatures); | |||
12131 | ||||
12132 | // Special case: don't report a feature set which includes arm-mode for | |||
12133 | // targets that don't have ARM mode. | |||
12134 | if (MissingFeatures.test(Feature_IsARMBit) && !hasARM()) | |||
12135 | break; | |||
12136 | // Don't report any near-misses that both require switching instruction | |||
12137 | // set, and adding other subtarget features. | |||
12138 | if (isThumb() && MissingFeatures.test(Feature_IsARMBit) && | |||
12139 | MissingFeatures.count() > 1) | |||
12140 | break; | |||
12141 | if (!isThumb() && MissingFeatures.test(Feature_IsThumbBit) && | |||
12142 | MissingFeatures.count() > 1) | |||
12143 | break; | |||
12144 | if (!isThumb() && MissingFeatures.test(Feature_IsThumb2Bit) && | |||
12145 | (MissingFeatures & ~FeatureBitset({Feature_IsThumb2Bit, | |||
12146 | Feature_IsThumbBit})).any()) | |||
12147 | break; | |||
12148 | if (isMClass() && MissingFeatures.test(Feature_HasNEONBit)) | |||
12149 | break; | |||
12150 | ||||
12151 | NearMissMessage Message; | |||
12152 | Message.Loc = IDLoc; | |||
12153 | raw_svector_ostream OS(Message.Message); | |||
12154 | ||||
12155 | OS << "instruction requires:"; | |||
12156 | for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) | |||
12157 | if (MissingFeatures.test(i)) | |||
12158 | OS << ' ' << getSubtargetFeatureName(i); | |||
12159 | ||||
12160 | NearMissesOut.emplace_back(Message); | |||
12161 | ||||
12162 | break; | |||
12163 | } | |||
12164 | case NearMissInfo::NearMissPredicate: { | |||
12165 | NearMissMessage Message; | |||
12166 | Message.Loc = IDLoc; | |||
12167 | switch (I.getPredicateError()) { | |||
12168 | case Match_RequiresNotITBlock: | |||
12169 | Message.Message = "flag setting instruction only valid outside IT block"; | |||
12170 | break; | |||
12171 | case Match_RequiresITBlock: | |||
12172 | Message.Message = "instruction only valid inside IT block"; | |||
12173 | break; | |||
12174 | case Match_RequiresV6: | |||
12175 | Message.Message = "instruction variant requires ARMv6 or later"; | |||
12176 | break; | |||
12177 | case Match_RequiresThumb2: | |||
12178 | Message.Message = "instruction variant requires Thumb2"; | |||
12179 | break; | |||
12180 | case Match_RequiresV8: | |||
12181 | Message.Message = "instruction variant requires ARMv8 or later"; | |||
12182 | break; | |||
12183 | case Match_RequiresFlagSetting: | |||
12184 | Message.Message = "no flag-preserving variant of this instruction available"; | |||
12185 | break; | |||
12186 | case Match_InvalidOperand: | |||
12187 | Message.Message = "invalid operand for instruction"; | |||
12188 | break; | |||
12189 | default: | |||
12190 | llvm_unreachable("Unhandled target predicate error")::llvm::llvm_unreachable_internal("Unhandled target predicate error" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 12190); | |||
12191 | break; | |||
12192 | } | |||
12193 | NearMissesOut.emplace_back(Message); | |||
12194 | break; | |||
12195 | } | |||
12196 | case NearMissInfo::NearMissTooFewOperands: { | |||
12197 | if (!ReportedTooFewOperands) { | |||
12198 | SMLoc EndLoc = ((ARMOperand &)*Operands.back()).getEndLoc(); | |||
12199 | NearMissesOut.emplace_back(NearMissMessage{ | |||
12200 | EndLoc, StringRef("too few operands for instruction")}); | |||
12201 | ReportedTooFewOperands = true; | |||
12202 | } | |||
12203 | break; | |||
12204 | } | |||
12205 | case NearMissInfo::NoNearMiss: | |||
12206 | // This should never leave the matcher. | |||
12207 | llvm_unreachable("not a near-miss")::llvm::llvm_unreachable_internal("not a near-miss", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 12207); | |||
12208 | break; | |||
12209 | } | |||
12210 | } | |||
12211 | } | |||
12212 | ||||
12213 | void ARMAsmParser::ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses, | |||
12214 | SMLoc IDLoc, OperandVector &Operands) { | |||
12215 | SmallVector<NearMissMessage, 4> Messages; | |||
12216 | FilterNearMisses(NearMisses, Messages, IDLoc, Operands); | |||
12217 | ||||
12218 | if (Messages.size() == 0) { | |||
12219 | // No near-misses were found, so the best we can do is "invalid | |||
12220 | // instruction". | |||
12221 | Error(IDLoc, "invalid instruction"); | |||
12222 | } else if (Messages.size() == 1) { | |||
12223 | // One near miss was found, report it as the sole error. | |||
12224 | Error(Messages[0].Loc, Messages[0].Message); | |||
12225 | } else { | |||
12226 | // More than one near miss, so report a generic "invalid instruction" | |||
12227 | // error, followed by notes for each of the near-misses. | |||
12228 | Error(IDLoc, "invalid instruction, any one of the following would fix this:"); | |||
12229 | for (auto &M : Messages) { | |||
12230 | Note(M.Loc, M.Message); | |||
12231 | } | |||
12232 | } | |||
12233 | } | |||
12234 | ||||
12235 | bool ARMAsmParser::enableArchExtFeature(StringRef Name, SMLoc &ExtLoc) { | |||
12236 | // FIXME: This structure should be moved inside ARMTargetParser | |||
12237 | // when we start to table-generate them, and we can use the ARM | |||
12238 | // flags below, that were generated by table-gen. | |||
12239 | static const struct { | |||
12240 | const uint64_t Kind; | |||
12241 | const FeatureBitset ArchCheck; | |||
12242 | const FeatureBitset Features; | |||
12243 | } Extensions[] = { | |||
12244 | {ARM::AEK_CRC, {Feature_HasV8Bit}, {ARM::FeatureCRC}}, | |||
12245 | {ARM::AEK_AES, | |||
12246 | {Feature_HasV8Bit}, | |||
12247 | {ARM::FeatureAES, ARM::FeatureNEON, ARM::FeatureFPARMv8}}, | |||
12248 | {ARM::AEK_SHA2, | |||
12249 | {Feature_HasV8Bit}, | |||
12250 | {ARM::FeatureSHA2, ARM::FeatureNEON, ARM::FeatureFPARMv8}}, | |||
12251 | {ARM::AEK_CRYPTO, | |||
12252 | {Feature_HasV8Bit}, | |||
12253 | {ARM::FeatureCrypto, ARM::FeatureNEON, ARM::FeatureFPARMv8}}, | |||
12254 | {ARM::AEK_FP, | |||
12255 | {Feature_HasV8Bit}, | |||
12256 | {ARM::FeatureVFP2_SP, ARM::FeatureFPARMv8}}, | |||
12257 | {(ARM::AEK_HWDIVTHUMB | ARM::AEK_HWDIVARM), | |||
12258 | {Feature_HasV7Bit, Feature_IsNotMClassBit}, | |||
12259 | {ARM::FeatureHWDivThumb, ARM::FeatureHWDivARM}}, | |||
12260 | {ARM::AEK_MP, | |||
12261 | {Feature_HasV7Bit, Feature_IsNotMClassBit}, | |||
12262 | {ARM::FeatureMP}}, | |||
12263 | {ARM::AEK_SIMD, | |||
12264 | {Feature_HasV8Bit}, | |||
12265 | {ARM::FeatureNEON, ARM::FeatureVFP2_SP, ARM::FeatureFPARMv8}}, | |||
12266 | {ARM::AEK_SEC, {Feature_HasV6KBit}, {ARM::FeatureTrustZone}}, | |||
12267 | // FIXME: Only available in A-class, isel not predicated | |||
12268 | {ARM::AEK_VIRT, {Feature_HasV7Bit}, {ARM::FeatureVirtualization}}, | |||
12269 | {ARM::AEK_FP16, | |||
12270 | {Feature_HasV8_2aBit}, | |||
12271 | {ARM::FeatureFPARMv8, ARM::FeatureFullFP16}}, | |||
12272 | {ARM::AEK_RAS, {Feature_HasV8Bit}, {ARM::FeatureRAS}}, | |||
12273 | {ARM::AEK_LOB, {Feature_HasV8_1MMainlineBit}, {ARM::FeatureLOB}}, | |||
12274 | // FIXME: Unsupported extensions. | |||
12275 | {ARM::AEK_OS, {}, {}}, | |||
12276 | {ARM::AEK_IWMMXT, {}, {}}, | |||
12277 | {ARM::AEK_IWMMXT2, {}, {}}, | |||
12278 | {ARM::AEK_MAVERICK, {}, {}}, | |||
12279 | {ARM::AEK_XSCALE, {}, {}}, | |||
12280 | }; | |||
12281 | bool EnableFeature = true; | |||
12282 | if (Name.startswith_insensitive("no")) { | |||
12283 | EnableFeature = false; | |||
12284 | Name = Name.substr(2); | |||
12285 | } | |||
12286 | uint64_t FeatureKind = ARM::parseArchExt(Name); | |||
12287 | if (FeatureKind == ARM::AEK_INVALID) | |||
12288 | return Error(ExtLoc, "unknown architectural extension: " + Name); | |||
12289 | ||||
12290 | for (const auto &Extension : Extensions) { | |||
12291 | if (Extension.Kind != FeatureKind) | |||
12292 | continue; | |||
12293 | ||||
12294 | if (Extension.Features.none()) | |||
12295 | return Error(ExtLoc, "unsupported architectural extension: " + Name); | |||
12296 | ||||
12297 | if ((getAvailableFeatures() & Extension.ArchCheck) != Extension.ArchCheck) | |||
12298 | return Error(ExtLoc, "architectural extension '" + Name + | |||
12299 | "' is not " | |||
12300 | "allowed for the current base architecture"); | |||
12301 | ||||
12302 | MCSubtargetInfo &STI = copySTI(); | |||
12303 | if (EnableFeature) { | |||
12304 | STI.SetFeatureBitsTransitively(Extension.Features); | |||
12305 | } else { | |||
12306 | STI.ClearFeatureBitsTransitively(Extension.Features); | |||
12307 | } | |||
12308 | FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits()); | |||
12309 | setAvailableFeatures(Features); | |||
12310 | return true; | |||
12311 | } | |||
12312 | return false; | |||
12313 | } | |||
12314 | ||||
12315 | /// parseDirectiveArchExtension | |||
12316 | /// ::= .arch_extension [no]feature | |||
12317 | bool ARMAsmParser::parseDirectiveArchExtension(SMLoc L) { | |||
12318 | ||||
12319 | MCAsmParser &Parser = getParser(); | |||
12320 | ||||
12321 | if (getLexer().isNot(AsmToken::Identifier)) | |||
12322 | return Error(getLexer().getLoc(), "expected architecture extension name"); | |||
12323 | ||||
12324 | StringRef Name = Parser.getTok().getString(); | |||
12325 | SMLoc ExtLoc = Parser.getTok().getLoc(); | |||
12326 | Lex(); | |||
12327 | ||||
12328 | if (parseToken(AsmToken::EndOfStatement, | |||
12329 | "unexpected token in '.arch_extension' directive")) | |||
12330 | return true; | |||
12331 | ||||
12332 | if (Name == "nocrypto") { | |||
12333 | enableArchExtFeature("nosha2", ExtLoc); | |||
12334 | enableArchExtFeature("noaes", ExtLoc); | |||
12335 | } | |||
12336 | ||||
12337 | if (enableArchExtFeature(Name, ExtLoc)) | |||
12338 | return false; | |||
12339 | ||||
12340 | return Error(ExtLoc, "unknown architectural extension: " + Name); | |||
12341 | } | |||
12342 | ||||
12343 | // Define this matcher function after the auto-generated include so we | |||
12344 | // have the match class enum definitions. | |||
12345 | unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp, | |||
12346 | unsigned Kind) { | |||
12347 | ARMOperand &Op = static_cast<ARMOperand &>(AsmOp); | |||
12348 | // If the kind is a token for a literal immediate, check if our asm | |||
12349 | // operand matches. This is for InstAliases which have a fixed-value | |||
12350 | // immediate in the syntax. | |||
12351 | switch (Kind) { | |||
12352 | default: break; | |||
12353 | case MCK__HASH_0: | |||
12354 | if (Op.isImm()) | |||
12355 | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm())) | |||
12356 | if (CE->getValue() == 0) | |||
12357 | return Match_Success; | |||
12358 | break; | |||
12359 | case MCK__HASH_8: | |||
12360 | if (Op.isImm()) | |||
12361 | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm())) | |||
12362 | if (CE->getValue() == 8) | |||
12363 | return Match_Success; | |||
12364 | break; | |||
12365 | case MCK__HASH_16: | |||
12366 | if (Op.isImm()) | |||
12367 | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm())) | |||
12368 | if (CE->getValue() == 16) | |||
12369 | return Match_Success; | |||
12370 | break; | |||
12371 | case MCK_ModImm: | |||
12372 | if (Op.isImm()) { | |||
12373 | const MCExpr *SOExpr = Op.getImm(); | |||
12374 | int64_t Value; | |||
12375 | if (!SOExpr->evaluateAsAbsolute(Value)) | |||
12376 | return Match_Success; | |||
12377 | assert((Value >= std::numeric_limits<int32_t>::min() &&(static_cast <bool> ((Value >= std::numeric_limits< int32_t>::min() && Value <= std::numeric_limits <uint32_t>::max()) && "expression value must be representable in 32 bits" ) ? void (0) : __assert_fail ("(Value >= std::numeric_limits<int32_t>::min() && Value <= std::numeric_limits<uint32_t>::max()) && \"expression value must be representable in 32 bits\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 12379, __extension__ __PRETTY_FUNCTION__)) | |||
12378 | Value <= std::numeric_limits<uint32_t>::max()) &&(static_cast <bool> ((Value >= std::numeric_limits< int32_t>::min() && Value <= std::numeric_limits <uint32_t>::max()) && "expression value must be representable in 32 bits" ) ? void (0) : __assert_fail ("(Value >= std::numeric_limits<int32_t>::min() && Value <= std::numeric_limits<uint32_t>::max()) && \"expression value must be representable in 32 bits\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 12379, __extension__ __PRETTY_FUNCTION__)) | |||
12379 | "expression value must be representable in 32 bits")(static_cast <bool> ((Value >= std::numeric_limits< int32_t>::min() && Value <= std::numeric_limits <uint32_t>::max()) && "expression value must be representable in 32 bits" ) ? void (0) : __assert_fail ("(Value >= std::numeric_limits<int32_t>::min() && Value <= std::numeric_limits<uint32_t>::max()) && \"expression value must be representable in 32 bits\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp" , 12379, __extension__ __PRETTY_FUNCTION__)); | |||
12380 | } | |||
12381 | break; | |||
12382 | case MCK_rGPR: | |||
12383 | if (hasV8Ops() && Op.isReg() && Op.getReg() == ARM::SP) | |||
12384 | return Match_Success; | |||
12385 | return Match_rGPR; | |||
12386 | case MCK_GPRPair: | |||
12387 | if (Op.isReg() && | |||
12388 | MRI->getRegClass(ARM::GPRRegClassID).contains(Op.getReg())) | |||
12389 | return Match_Success; | |||
12390 | break; | |||
12391 | } | |||
12392 | return Match_InvalidOperand; | |||
12393 | } | |||
12394 | ||||
12395 | bool ARMAsmParser::isMnemonicVPTPredicable(StringRef Mnemonic, | |||
12396 | StringRef ExtraToken) { | |||
12397 | if (!hasMVE()) | |||
12398 | return false; | |||
12399 | ||||
12400 | return Mnemonic.startswith("vabav") || Mnemonic.startswith("vaddv") || | |||
12401 | Mnemonic.startswith("vaddlv") || Mnemonic.startswith("vminnmv") || | |||
12402 | Mnemonic.startswith("vminnmav") || Mnemonic.startswith("vminv") || | |||
12403 | Mnemonic.startswith("vminav") || Mnemonic.startswith("vmaxnmv") || | |||
12404 | Mnemonic.startswith("vmaxnmav") || Mnemonic.startswith("vmaxv") || | |||
12405 | Mnemonic.startswith("vmaxav") || Mnemonic.startswith("vmladav") || | |||
12406 | Mnemonic.startswith("vrmlaldavh") || Mnemonic.startswith("vrmlalvh") || | |||
12407 | Mnemonic.startswith("vmlsdav") || Mnemonic.startswith("vmlav") || | |||
12408 | Mnemonic.startswith("vmlaldav") || Mnemonic.startswith("vmlalv") || | |||
12409 | Mnemonic.startswith("vmaxnm") || Mnemonic.startswith("vminnm") || | |||
12410 | Mnemonic.startswith("vmax") || Mnemonic.startswith("vmin") || | |||
12411 | Mnemonic.startswith("vshlc") || Mnemonic.startswith("vmovlt") || | |||
12412 | Mnemonic.startswith("vmovlb") || Mnemonic.startswith("vshll") || | |||
12413 | Mnemonic.startswith("vrshrn") || Mnemonic.startswith("vshrn") || | |||
12414 | Mnemonic.startswith("vqrshrun") || Mnemonic.startswith("vqshrun") || | |||
12415 | Mnemonic.startswith("vqrshrn") || Mnemonic.startswith("vqshrn") || | |||
12416 | Mnemonic.startswith("vbic") || Mnemonic.startswith("vrev64") || | |||
12417 | Mnemonic.startswith("vrev32") || Mnemonic.startswith("vrev16") || | |||
12418 | Mnemonic.startswith("vmvn") || Mnemonic.startswith("veor") || | |||
12419 | Mnemonic.startswith("vorn") || Mnemonic.startswith("vorr") || | |||
12420 | Mnemonic.startswith("vand") || Mnemonic.startswith("vmul") || | |||
12421 | Mnemonic.startswith("vqrdmulh") || Mnemonic.startswith("vqdmulh") || | |||
12422 | Mnemonic.startswith("vsub") || Mnemonic.startswith("vadd") || | |||
12423 | Mnemonic.startswith("vqsub") || Mnemonic.startswith("vqadd") || | |||
12424 | Mnemonic.startswith("vabd") || Mnemonic.startswith("vrhadd") || | |||
12425 | Mnemonic.startswith("vhsub") || Mnemonic.startswith("vhadd") || | |||
12426 | Mnemonic.startswith("vdup") || Mnemonic.startswith("vcls") || | |||
12427 | Mnemonic.startswith("vclz") || Mnemonic.startswith("vneg") || | |||
12428 | Mnemonic.startswith("vabs") || Mnemonic.startswith("vqneg") || | |||
12429 | Mnemonic.startswith("vqabs") || | |||
12430 | (Mnemonic.startswith("vrint") && Mnemonic != "vrintr") || | |||
12431 | Mnemonic.startswith("vcmla") || Mnemonic.startswith("vfma") || | |||
12432 | Mnemonic.startswith("vfms") || Mnemonic.startswith("vcadd") || | |||
12433 | Mnemonic.startswith("vadd") || Mnemonic.startswith("vsub") || | |||
12434 | Mnemonic.startswith("vshl") || Mnemonic.startswith("vqshl") || | |||
12435 | Mnemonic.startswith("vqrshl") || Mnemonic.startswith("vrshl") || | |||
12436 | Mnemonic.startswith("vsri") || Mnemonic.startswith("vsli") || | |||
12437 | Mnemonic.startswith("vrshr") || Mnemonic.startswith("vshr") || | |||
12438 | Mnemonic.startswith("vpsel") || Mnemonic.startswith("vcmp") || | |||
12439 | Mnemonic.startswith("vqdmladh") || Mnemonic.startswith("vqrdmladh") || | |||
12440 | Mnemonic.startswith("vqdmlsdh") || Mnemonic.startswith("vqrdmlsdh") || | |||
12441 | Mnemonic.startswith("vcmul") || Mnemonic.startswith("vrmulh") || | |||
12442 | Mnemonic.startswith("vqmovn") || Mnemonic.startswith("vqmovun") || | |||
12443 | Mnemonic.startswith("vmovnt") || Mnemonic.startswith("vmovnb") || | |||
12444 | Mnemonic.startswith("vmaxa") || Mnemonic.startswith("vmaxnma") || | |||
12445 | Mnemonic.startswith("vhcadd") || Mnemonic.startswith("vadc") || | |||
12446 | Mnemonic.startswith("vsbc") || Mnemonic.startswith("vrshr") || | |||
12447 | Mnemonic.startswith("vshr") || Mnemonic.startswith("vstrb") || | |||
12448 | Mnemonic.startswith("vldrb") || | |||
12449 | (Mnemonic.startswith("vstrh") && Mnemonic != "vstrhi") || | |||
12450 | (Mnemonic.startswith("vldrh") && Mnemonic != "vldrhi") || | |||
12451 | Mnemonic.startswith("vstrw") || Mnemonic.startswith("vldrw") || | |||
12452 | Mnemonic.startswith("vldrd") || Mnemonic.startswith("vstrd") || | |||
12453 | Mnemonic.startswith("vqdmull") || Mnemonic.startswith("vbrsr") || | |||
12454 | Mnemonic.startswith("vfmas") || Mnemonic.startswith("vmlas") || | |||
12455 | Mnemonic.startswith("vmla") || Mnemonic.startswith("vqdmlash") || | |||
12456 | Mnemonic.startswith("vqdmlah") || Mnemonic.startswith("vqrdmlash") || | |||
12457 | Mnemonic.startswith("vqrdmlah") || Mnemonic.startswith("viwdup") || | |||
12458 | Mnemonic.startswith("vdwdup") || Mnemonic.startswith("vidup") || | |||
12459 | Mnemonic.startswith("vddup") || Mnemonic.startswith("vctp") || | |||
12460 | Mnemonic.startswith("vpnot") || Mnemonic.startswith("vbic") || | |||
12461 | Mnemonic.startswith("vrmlsldavh") || Mnemonic.startswith("vmlsldav") || | |||
12462 | Mnemonic.startswith("vcvt") || | |||
12463 | MS.isVPTPredicableCDEInstr(Mnemonic) || | |||
12464 | (Mnemonic.startswith("vmov") && | |||
12465 | !(ExtraToken == ".f16" || ExtraToken == ".32" || | |||
12466 | ExtraToken == ".16" || ExtraToken == ".8")); | |||
12467 | } |
1 | //===-- llvm/Support/MathExtras.h - Useful math functions -------*- C++ -*-===// | ||||
2 | // | ||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||
6 | // | ||||
7 | //===----------------------------------------------------------------------===// | ||||
8 | // | ||||
9 | // This file contains some functions that are useful for math stuff. | ||||
10 | // | ||||
11 | //===----------------------------------------------------------------------===// | ||||
12 | |||||
13 | #ifndef LLVM_SUPPORT_MATHEXTRAS_H | ||||
14 | #define LLVM_SUPPORT_MATHEXTRAS_H | ||||
15 | |||||
16 | #include "llvm/Support/Compiler.h" | ||||
17 | #include <cassert> | ||||
18 | #include <climits> | ||||
19 | #include <cmath> | ||||
20 | #include <cstdint> | ||||
21 | #include <cstring> | ||||
22 | #include <limits> | ||||
23 | #include <type_traits> | ||||
24 | |||||
25 | #ifdef __ANDROID_NDK__ | ||||
26 | #include <android/api-level.h> | ||||
27 | #endif | ||||
28 | |||||
29 | #ifdef _MSC_VER | ||||
30 | // Declare these intrinsics manually rather including intrin.h. It's very | ||||
31 | // expensive, and MathExtras.h is popular. | ||||
32 | // #include <intrin.h> | ||||
33 | extern "C" { | ||||
34 | unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask); | ||||
35 | unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask); | ||||
36 | unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask); | ||||
37 | unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask); | ||||
38 | } | ||||
39 | #endif | ||||
40 | |||||
41 | namespace llvm { | ||||
42 | |||||
43 | /// The behavior an operation has on an input of 0. | ||||
44 | enum ZeroBehavior { | ||||
45 | /// The returned value is undefined. | ||||
46 | ZB_Undefined, | ||||
47 | /// The returned value is numeric_limits<T>::max() | ||||
48 | ZB_Max, | ||||
49 | /// The returned value is numeric_limits<T>::digits | ||||
50 | ZB_Width | ||||
51 | }; | ||||
52 | |||||
53 | /// Mathematical constants. | ||||
54 | namespace numbers { | ||||
55 | // TODO: Track C++20 std::numbers. | ||||
56 | // TODO: Favor using the hexadecimal FP constants (requires C++17). | ||||
57 | constexpr double e = 2.7182818284590452354, // (0x1.5bf0a8b145749P+1) https://oeis.org/A001113 | ||||
58 | egamma = .57721566490153286061, // (0x1.2788cfc6fb619P-1) https://oeis.org/A001620 | ||||
59 | ln2 = .69314718055994530942, // (0x1.62e42fefa39efP-1) https://oeis.org/A002162 | ||||
60 | ln10 = 2.3025850929940456840, // (0x1.24bb1bbb55516P+1) https://oeis.org/A002392 | ||||
61 | log2e = 1.4426950408889634074, // (0x1.71547652b82feP+0) | ||||
62 | log10e = .43429448190325182765, // (0x1.bcb7b1526e50eP-2) | ||||
63 | pi = 3.1415926535897932385, // (0x1.921fb54442d18P+1) https://oeis.org/A000796 | ||||
64 | inv_pi = .31830988618379067154, // (0x1.45f306bc9c883P-2) https://oeis.org/A049541 | ||||
65 | sqrtpi = 1.7724538509055160273, // (0x1.c5bf891b4ef6bP+0) https://oeis.org/A002161 | ||||
66 | inv_sqrtpi = .56418958354775628695, // (0x1.20dd750429b6dP-1) https://oeis.org/A087197 | ||||
67 | sqrt2 = 1.4142135623730950488, // (0x1.6a09e667f3bcdP+0) https://oeis.org/A00219 | ||||
68 | inv_sqrt2 = .70710678118654752440, // (0x1.6a09e667f3bcdP-1) | ||||
69 | sqrt3 = 1.7320508075688772935, // (0x1.bb67ae8584caaP+0) https://oeis.org/A002194 | ||||
70 | inv_sqrt3 = .57735026918962576451, // (0x1.279a74590331cP-1) | ||||
71 | phi = 1.6180339887498948482; // (0x1.9e3779b97f4a8P+0) https://oeis.org/A001622 | ||||
72 | constexpr float ef = 2.71828183F, // (0x1.5bf0a8P+1) https://oeis.org/A001113 | ||||
73 | egammaf = .577215665F, // (0x1.2788d0P-1) https://oeis.org/A001620 | ||||
74 | ln2f = .693147181F, // (0x1.62e430P-1) https://oeis.org/A002162 | ||||
75 | ln10f = 2.30258509F, // (0x1.26bb1cP+1) https://oeis.org/A002392 | ||||
76 | log2ef = 1.44269504F, // (0x1.715476P+0) | ||||
77 | log10ef = .434294482F, // (0x1.bcb7b2P-2) | ||||
78 | pif = 3.14159265F, // (0x1.921fb6P+1) https://oeis.org/A000796 | ||||
79 | inv_pif = .318309886F, // (0x1.45f306P-2) https://oeis.org/A049541 | ||||
80 | sqrtpif = 1.77245385F, // (0x1.c5bf8aP+0) https://oeis.org/A002161 | ||||
81 | inv_sqrtpif = .564189584F, // (0x1.20dd76P-1) https://oeis.org/A087197 | ||||
82 | sqrt2f = 1.41421356F, // (0x1.6a09e6P+0) https://oeis.org/A002193 | ||||
83 | inv_sqrt2f = .707106781F, // (0x1.6a09e6P-1) | ||||
84 | sqrt3f = 1.73205081F, // (0x1.bb67aeP+0) https://oeis.org/A002194 | ||||
85 | inv_sqrt3f = .577350269F, // (0x1.279a74P-1) | ||||
86 | phif = 1.61803399F; // (0x1.9e377aP+0) https://oeis.org/A001622 | ||||
87 | } // namespace numbers | ||||
88 | |||||
89 | namespace detail { | ||||
90 | template <typename T, std::size_t SizeOfT> struct TrailingZerosCounter { | ||||
91 | static unsigned count(T Val, ZeroBehavior) { | ||||
92 | if (!Val) | ||||
93 | return std::numeric_limits<T>::digits; | ||||
94 | if (Val & 0x1) | ||||
95 | return 0; | ||||
96 | |||||
97 | // Bisection method. | ||||
98 | unsigned ZeroBits = 0; | ||||
99 | T Shift = std::numeric_limits<T>::digits >> 1; | ||||
100 | T Mask = std::numeric_limits<T>::max() >> Shift; | ||||
101 | while (Shift) { | ||||
102 | if ((Val & Mask) == 0) { | ||||
103 | Val >>= Shift; | ||||
104 | ZeroBits |= Shift; | ||||
105 | } | ||||
106 | Shift >>= 1; | ||||
107 | Mask >>= Shift; | ||||
108 | } | ||||
109 | return ZeroBits; | ||||
110 | } | ||||
111 | }; | ||||
112 | |||||
113 | #if defined(__GNUC__4) || defined(_MSC_VER) | ||||
114 | template <typename T> struct TrailingZerosCounter<T, 4> { | ||||
115 | static unsigned count(T Val, ZeroBehavior ZB) { | ||||
116 | if (ZB
| ||||
117 | return 32; | ||||
118 | |||||
119 | #if __has_builtin(__builtin_ctz)1 || defined(__GNUC__4) | ||||
120 | return __builtin_ctz(Val); | ||||
121 | #elif defined(_MSC_VER) | ||||
122 | unsigned long Index; | ||||
123 | _BitScanForward(&Index, Val); | ||||
124 | return Index; | ||||
125 | #endif | ||||
126 | } | ||||
127 | }; | ||||
128 | |||||
129 | #if !defined(_MSC_VER) || defined(_M_X64) | ||||
130 | template <typename T> struct TrailingZerosCounter<T, 8> { | ||||
131 | static unsigned count(T Val, ZeroBehavior ZB) { | ||||
132 | if (ZB != ZB_Undefined && Val == 0) | ||||
133 | return 64; | ||||
134 | |||||
135 | #if __has_builtin(__builtin_ctzll)1 || defined(__GNUC__4) | ||||
136 | return __builtin_ctzll(Val); | ||||
137 | #elif defined(_MSC_VER) | ||||
138 | unsigned long Index; | ||||
139 | _BitScanForward64(&Index, Val); | ||||
140 | return Index; | ||||
141 | #endif | ||||
142 | } | ||||
143 | }; | ||||
144 | #endif | ||||
145 | #endif | ||||
146 | } // namespace detail | ||||
147 | |||||
148 | /// Count number of 0's from the least significant bit to the most | ||||
149 | /// stopping at the first 1. | ||||
150 | /// | ||||
151 | /// Only unsigned integral types are allowed. | ||||
152 | /// | ||||
153 | /// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are | ||||
154 | /// valid arguments. | ||||
155 | template <typename T> | ||||
156 | unsigned countTrailingZeros(T Val, ZeroBehavior ZB = ZB_Width) { | ||||
157 | static_assert(std::numeric_limits<T>::is_integer && | ||||
158 | !std::numeric_limits<T>::is_signed, | ||||
159 | "Only unsigned integral types are allowed."); | ||||
160 | return llvm::detail::TrailingZerosCounter<T, sizeof(T)>::count(Val, ZB); | ||||
161 | } | ||||
162 | |||||
163 | namespace detail { | ||||
164 | template <typename T, std::size_t SizeOfT> struct LeadingZerosCounter { | ||||
165 | static unsigned count(T Val, ZeroBehavior) { | ||||
166 | if (!Val) | ||||
167 | return std::numeric_limits<T>::digits; | ||||
168 | |||||
169 | // Bisection method. | ||||
170 | unsigned ZeroBits = 0; | ||||
171 | for (T Shift = std::numeric_limits<T>::digits >> 1; Shift; Shift >>= 1) { | ||||
172 | T Tmp = Val >> Shift; | ||||
173 | if (Tmp) | ||||
174 | Val = Tmp; | ||||
175 | else | ||||
176 | ZeroBits |= Shift; | ||||
177 | } | ||||
178 | return ZeroBits; | ||||
179 | } | ||||
180 | }; | ||||
181 | |||||
182 | #if defined(__GNUC__4) || defined(_MSC_VER) | ||||
183 | template <typename T> struct LeadingZerosCounter<T, 4> { | ||||
184 | static unsigned count(T Val, ZeroBehavior ZB) { | ||||
185 | if (ZB != ZB_Undefined && Val == 0) | ||||
186 | return 32; | ||||
187 | |||||
188 | #if __has_builtin(__builtin_clz)1 || defined(__GNUC__4) | ||||
189 | return __builtin_clz(Val); | ||||
190 | #elif defined(_MSC_VER) | ||||
191 | unsigned long Index; | ||||
192 | _BitScanReverse(&Index, Val); | ||||
193 | return Index ^ 31; | ||||
194 | #endif | ||||
195 | } | ||||
196 | }; | ||||
197 | |||||
198 | #if !defined(_MSC_VER) || defined(_M_X64) | ||||
199 | template <typename T> struct LeadingZerosCounter<T, 8> { | ||||
200 | static unsigned count(T Val, ZeroBehavior ZB) { | ||||
201 | if (ZB != ZB_Undefined && Val == 0) | ||||
202 | return 64; | ||||
203 | |||||
204 | #if __has_builtin(__builtin_clzll)1 || defined(__GNUC__4) | ||||
205 | return __builtin_clzll(Val); | ||||
206 | #elif defined(_MSC_VER) | ||||
207 | unsigned long Index; | ||||
208 | _BitScanReverse64(&Index, Val); | ||||
209 | return Index ^ 63; | ||||
210 | #endif | ||||
211 | } | ||||
212 | }; | ||||
213 | #endif | ||||
214 | #endif | ||||
215 | } // namespace detail | ||||
216 | |||||
217 | /// Count number of 0's from the most significant bit to the least | ||||
218 | /// stopping at the first 1. | ||||
219 | /// | ||||
220 | /// Only unsigned integral types are allowed. | ||||
221 | /// | ||||
222 | /// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are | ||||
223 | /// valid arguments. | ||||
224 | template <typename T> | ||||
225 | unsigned countLeadingZeros(T Val, ZeroBehavior ZB = ZB_Width) { | ||||
226 | static_assert(std::numeric_limits<T>::is_integer && | ||||
227 | !std::numeric_limits<T>::is_signed, | ||||
228 | "Only unsigned integral types are allowed."); | ||||
229 | return llvm::detail::LeadingZerosCounter<T, sizeof(T)>::count(Val, ZB); | ||||
230 | } | ||||
231 | |||||
232 | /// Get the index of the first set bit starting from the least | ||||
233 | /// significant bit. | ||||
234 | /// | ||||
235 | /// Only unsigned integral types are allowed. | ||||
236 | /// | ||||
237 | /// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are | ||||
238 | /// valid arguments. | ||||
239 | template <typename T> T findFirstSet(T Val, ZeroBehavior ZB = ZB_Max) { | ||||
240 | if (ZB == ZB_Max && Val == 0) | ||||
241 | return std::numeric_limits<T>::max(); | ||||
242 | |||||
243 | return countTrailingZeros(Val, ZB_Undefined); | ||||
244 | } | ||||
245 | |||||
246 | /// Create a bitmask with the N right-most bits set to 1, and all other | ||||
247 | /// bits set to 0. Only unsigned types are allowed. | ||||
248 | template <typename T> T maskTrailingOnes(unsigned N) { | ||||
249 | static_assert(std::is_unsigned<T>::value, "Invalid type!"); | ||||
250 | const unsigned Bits = CHAR_BIT8 * sizeof(T); | ||||
251 | assert(N <= Bits && "Invalid bit index")(static_cast <bool> (N <= Bits && "Invalid bit index" ) ? void (0) : __assert_fail ("N <= Bits && \"Invalid bit index\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/MathExtras.h" , 251, __extension__ __PRETTY_FUNCTION__)); | ||||
252 | return N == 0 ? 0 : (T(-1) >> (Bits - N)); | ||||
253 | } | ||||
254 | |||||
255 | /// Create a bitmask with the N left-most bits set to 1, and all other | ||||
256 | /// bits set to 0. Only unsigned types are allowed. | ||||
257 | template <typename T> T maskLeadingOnes(unsigned N) { | ||||
258 | return ~maskTrailingOnes<T>(CHAR_BIT8 * sizeof(T) - N); | ||||
259 | } | ||||
260 | |||||
261 | /// Create a bitmask with the N right-most bits set to 0, and all other | ||||
262 | /// bits set to 1. Only unsigned types are allowed. | ||||
263 | template <typename T> T maskTrailingZeros(unsigned N) { | ||||
264 | return maskLeadingOnes<T>(CHAR_BIT8 * sizeof(T) - N); | ||||
265 | } | ||||
266 | |||||
267 | /// Create a bitmask with the N left-most bits set to 0, and all other | ||||
268 | /// bits set to 1. Only unsigned types are allowed. | ||||
269 | template <typename T> T maskLeadingZeros(unsigned N) { | ||||
270 | return maskTrailingOnes<T>(CHAR_BIT8 * sizeof(T) - N); | ||||
271 | } | ||||
272 | |||||
273 | /// Get the index of the last set bit starting from the least | ||||
274 | /// significant bit. | ||||
275 | /// | ||||
276 | /// Only unsigned integral types are allowed. | ||||
277 | /// | ||||
278 | /// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are | ||||
279 | /// valid arguments. | ||||
280 | template <typename T> T findLastSet(T Val, ZeroBehavior ZB = ZB_Max) { | ||||
281 | if (ZB == ZB_Max && Val == 0) | ||||
282 | return std::numeric_limits<T>::max(); | ||||
283 | |||||
284 | // Use ^ instead of - because both gcc and llvm can remove the associated ^ | ||||
285 | // in the __builtin_clz intrinsic on x86. | ||||
286 | return countLeadingZeros(Val, ZB_Undefined) ^ | ||||
287 | (std::numeric_limits<T>::digits - 1); | ||||
288 | } | ||||
289 | |||||
290 | /// Macro compressed bit reversal table for 256 bits. | ||||
291 | /// | ||||
292 | /// http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable | ||||
293 | static const unsigned char BitReverseTable256[256] = { | ||||
294 | #define R2(n) n, n + 2 * 64, n + 1 * 64, n + 3 * 64 | ||||
295 | #define R4(n) R2(n), R2(n + 2 * 16), R2(n + 1 * 16), R2(n + 3 * 16) | ||||
296 | #define R6(n) R4(n), R4(n + 2 * 4), R4(n + 1 * 4), R4(n + 3 * 4) | ||||
297 | R6(0), R6(2), R6(1), R6(3) | ||||
298 | #undef R2 | ||||
299 | #undef R4 | ||||
300 | #undef R6 | ||||
301 | }; | ||||
302 | |||||
303 | /// Reverse the bits in \p Val. | ||||
304 | template <typename T> | ||||
305 | T reverseBits(T Val) { | ||||
306 | unsigned char in[sizeof(Val)]; | ||||
307 | unsigned char out[sizeof(Val)]; | ||||
308 | std::memcpy(in, &Val, sizeof(Val)); | ||||
309 | for (unsigned i = 0; i < sizeof(Val); ++i) | ||||
310 | out[(sizeof(Val) - i) - 1] = BitReverseTable256[in[i]]; | ||||
311 | std::memcpy(&Val, out, sizeof(Val)); | ||||
312 | return Val; | ||||
313 | } | ||||
314 | |||||
315 | #if __has_builtin(__builtin_bitreverse8)1 | ||||
316 | template<> | ||||
317 | inline uint8_t reverseBits<uint8_t>(uint8_t Val) { | ||||
318 | return __builtin_bitreverse8(Val); | ||||
319 | } | ||||
320 | #endif | ||||
321 | |||||
322 | #if __has_builtin(__builtin_bitreverse16)1 | ||||
323 | template<> | ||||
324 | inline uint16_t reverseBits<uint16_t>(uint16_t Val) { | ||||
325 | return __builtin_bitreverse16(Val); | ||||
326 | } | ||||
327 | #endif | ||||
328 | |||||
329 | #if __has_builtin(__builtin_bitreverse32)1 | ||||
330 | template<> | ||||
331 | inline uint32_t reverseBits<uint32_t>(uint32_t Val) { | ||||
332 | return __builtin_bitreverse32(Val); | ||||
333 | } | ||||
334 | #endif | ||||
335 | |||||
336 | #if __has_builtin(__builtin_bitreverse64)1 | ||||
337 | template<> | ||||
338 | inline uint64_t reverseBits<uint64_t>(uint64_t Val) { | ||||
339 | return __builtin_bitreverse64(Val); | ||||
340 | } | ||||
341 | #endif | ||||
342 | |||||
343 | // NOTE: The following support functions use the _32/_64 extensions instead of | ||||
344 | // type overloading so that signed and unsigned integers can be used without | ||||
345 | // ambiguity. | ||||
346 | |||||
347 | /// Return the high 32 bits of a 64 bit value. | ||||
348 | constexpr inline uint32_t Hi_32(uint64_t Value) { | ||||
349 | return static_cast<uint32_t>(Value >> 32); | ||||
350 | } | ||||
351 | |||||
352 | /// Return the low 32 bits of a 64 bit value. | ||||
353 | constexpr inline uint32_t Lo_32(uint64_t Value) { | ||||
354 | return static_cast<uint32_t>(Value); | ||||
355 | } | ||||
356 | |||||
357 | /// Make a 64-bit integer from a high / low pair of 32-bit integers. | ||||
358 | constexpr inline uint64_t Make_64(uint32_t High, uint32_t Low) { | ||||
359 | return ((uint64_t)High << 32) | (uint64_t)Low; | ||||
360 | } | ||||
361 | |||||
362 | /// Checks if an integer fits into the given bit width. | ||||
363 | template <unsigned N> constexpr inline bool isInt(int64_t x) { | ||||
364 | return N >= 64 || (-(INT64_C(1)1L<<(N-1)) <= x && x < (INT64_C(1)1L<<(N-1))); | ||||
365 | } | ||||
366 | // Template specializations to get better code for common cases. | ||||
367 | template <> constexpr inline bool isInt<8>(int64_t x) { | ||||
368 | return static_cast<int8_t>(x) == x; | ||||
369 | } | ||||
370 | template <> constexpr inline bool isInt<16>(int64_t x) { | ||||
371 | return static_cast<int16_t>(x) == x; | ||||
372 | } | ||||
373 | template <> constexpr inline bool isInt<32>(int64_t x) { | ||||
374 | return static_cast<int32_t>(x) == x; | ||||
375 | } | ||||
376 | |||||
377 | /// Checks if a signed integer is an N bit number shifted left by S. | ||||
378 | template <unsigned N, unsigned S> | ||||
379 | constexpr inline bool isShiftedInt(int64_t x) { | ||||
380 | static_assert( | ||||
381 | N > 0, "isShiftedInt<0> doesn't make sense (refers to a 0-bit number."); | ||||
382 | static_assert(N + S <= 64, "isShiftedInt<N, S> with N + S > 64 is too wide."); | ||||
383 | return isInt<N + S>(x) && (x % (UINT64_C(1)1UL << S) == 0); | ||||
384 | } | ||||
385 | |||||
386 | /// Checks if an unsigned integer fits into the given bit width. | ||||
387 | /// | ||||
388 | /// This is written as two functions rather than as simply | ||||
389 | /// | ||||
390 | /// return N >= 64 || X < (UINT64_C(1) << N); | ||||
391 | /// | ||||
392 | /// to keep MSVC from (incorrectly) warning on isUInt<64> that we're shifting | ||||
393 | /// left too many places. | ||||
394 | template <unsigned N> | ||||
395 | constexpr inline std::enable_if_t<(N < 64), bool> isUInt(uint64_t X) { | ||||
396 | static_assert(N > 0, "isUInt<0> doesn't make sense"); | ||||
397 | return X < (UINT64_C(1)1UL << (N)); | ||||
398 | } | ||||
399 | template <unsigned N> | ||||
400 | constexpr inline std::enable_if_t<N >= 64, bool> isUInt(uint64_t) { | ||||
401 | return true; | ||||
402 | } | ||||
403 | |||||
404 | // Template specializations to get better code for common cases. | ||||
405 | template <> constexpr inline bool isUInt<8>(uint64_t x) { | ||||
406 | return static_cast<uint8_t>(x) == x; | ||||
407 | } | ||||
408 | template <> constexpr inline bool isUInt<16>(uint64_t x) { | ||||
409 | return static_cast<uint16_t>(x) == x; | ||||
410 | } | ||||
411 | template <> constexpr inline bool isUInt<32>(uint64_t x) { | ||||
412 | return static_cast<uint32_t>(x) == x; | ||||
413 | } | ||||
414 | |||||
415 | /// Checks if a unsigned integer is an N bit number shifted left by S. | ||||
416 | template <unsigned N, unsigned S> | ||||
417 | constexpr inline bool isShiftedUInt(uint64_t x) { | ||||
418 | static_assert( | ||||
419 | N > 0, "isShiftedUInt<0> doesn't make sense (refers to a 0-bit number)"); | ||||
420 | static_assert(N + S <= 64, | ||||
421 | "isShiftedUInt<N, S> with N + S > 64 is too wide."); | ||||
422 | // Per the two static_asserts above, S must be strictly less than 64. So | ||||
423 | // 1 << S is not undefined behavior. | ||||
424 | return isUInt<N + S>(x) && (x % (UINT64_C(1)1UL << S) == 0); | ||||
425 | } | ||||
426 | |||||
427 | /// Gets the maximum value for a N-bit unsigned integer. | ||||
428 | inline uint64_t maxUIntN(uint64_t N) { | ||||
429 | assert(N > 0 && N <= 64 && "integer width out of range")(static_cast <bool> (N > 0 && N <= 64 && "integer width out of range") ? void (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/MathExtras.h" , 429, __extension__ __PRETTY_FUNCTION__)); | ||||
430 | |||||
431 | // uint64_t(1) << 64 is undefined behavior, so we can't do | ||||
432 | // (uint64_t(1) << N) - 1 | ||||
433 | // without checking first that N != 64. But this works and doesn't have a | ||||
434 | // branch. | ||||
435 | return UINT64_MAX(18446744073709551615UL) >> (64 - N); | ||||
436 | } | ||||
437 | |||||
438 | /// Gets the minimum value for a N-bit signed integer. | ||||
439 | inline int64_t minIntN(int64_t N) { | ||||
440 | assert(N > 0 && N <= 64 && "integer width out of range")(static_cast <bool> (N > 0 && N <= 64 && "integer width out of range") ? void (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/MathExtras.h" , 440, __extension__ __PRETTY_FUNCTION__)); | ||||
441 | |||||
442 | return UINT64_C(1)1UL + ~(UINT64_C(1)1UL << (N - 1)); | ||||
443 | } | ||||
444 | |||||
445 | /// Gets the maximum value for a N-bit signed integer. | ||||
446 | inline int64_t maxIntN(int64_t N) { | ||||
447 | assert(N > 0 && N <= 64 && "integer width out of range")(static_cast <bool> (N > 0 && N <= 64 && "integer width out of range") ? void (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/MathExtras.h" , 447, __extension__ __PRETTY_FUNCTION__)); | ||||
448 | |||||
449 | // This relies on two's complement wraparound when N == 64, so we convert to | ||||
450 | // int64_t only at the very end to avoid UB. | ||||
451 | return (UINT64_C(1)1UL << (N - 1)) - 1; | ||||
452 | } | ||||
453 | |||||
454 | /// Checks if an unsigned integer fits into the given (dynamic) bit width. | ||||
455 | inline bool isUIntN(unsigned N, uint64_t x) { | ||||
456 | return N >= 64 || x <= maxUIntN(N); | ||||
457 | } | ||||
458 | |||||
459 | /// Checks if an signed integer fits into the given (dynamic) bit width. | ||||
460 | inline bool isIntN(unsigned N, int64_t x) { | ||||
461 | return N >= 64 || (minIntN(N) <= x && x <= maxIntN(N)); | ||||
462 | } | ||||
463 | |||||
464 | /// Return true if the argument is a non-empty sequence of ones starting at the | ||||
465 | /// least significant bit with the remainder zero (32 bit version). | ||||
466 | /// Ex. isMask_32(0x0000FFFFU) == true. | ||||
467 | constexpr inline bool isMask_32(uint32_t Value) { | ||||
468 | return Value && ((Value + 1) & Value) == 0; | ||||
469 | } | ||||
470 | |||||
471 | /// Return true if the argument is a non-empty sequence of ones starting at the | ||||
472 | /// least significant bit with the remainder zero (64 bit version). | ||||
473 | constexpr inline bool isMask_64(uint64_t Value) { | ||||
474 | return Value && ((Value + 1) & Value) == 0; | ||||
475 | } | ||||
476 | |||||
477 | /// Return true if the argument contains a non-empty sequence of ones with the | ||||
478 | /// remainder zero (32 bit version.) Ex. isShiftedMask_32(0x0000FF00U) == true. | ||||
479 | constexpr inline bool isShiftedMask_32(uint32_t Value) { | ||||
480 | return Value && isMask_32((Value - 1) | Value); | ||||
481 | } | ||||
482 | |||||
483 | /// Return true if the argument contains a non-empty sequence of ones with the | ||||
484 | /// remainder zero (64 bit version.) | ||||
485 | constexpr inline bool isShiftedMask_64(uint64_t Value) { | ||||
486 | return Value && isMask_64((Value - 1) | Value); | ||||
487 | } | ||||
488 | |||||
489 | /// Return true if the argument is a power of two > 0. | ||||
490 | /// Ex. isPowerOf2_32(0x00100000U) == true (32 bit edition.) | ||||
491 | constexpr inline bool isPowerOf2_32(uint32_t Value) { | ||||
492 | return Value && !(Value & (Value - 1)); | ||||
493 | } | ||||
494 | |||||
495 | /// Return true if the argument is a power of two > 0 (64 bit edition.) | ||||
496 | constexpr inline bool isPowerOf2_64(uint64_t Value) { | ||||
497 | return Value && !(Value & (Value - 1)); | ||||
498 | } | ||||
499 | |||||
500 | /// Count the number of ones from the most significant bit to the first | ||||
501 | /// zero bit. | ||||
502 | /// | ||||
503 | /// Ex. countLeadingOnes(0xFF0FFF00) == 8. | ||||
504 | /// Only unsigned integral types are allowed. | ||||
505 | /// | ||||
506 | /// \param ZB the behavior on an input of all ones. Only ZB_Width and | ||||
507 | /// ZB_Undefined are valid arguments. | ||||
508 | template <typename T> | ||||
509 | unsigned countLeadingOnes(T Value, ZeroBehavior ZB = ZB_Width) { | ||||
510 | static_assert(std::numeric_limits<T>::is_integer && | ||||
511 | !std::numeric_limits<T>::is_signed, | ||||
512 | "Only unsigned integral types are allowed."); | ||||
513 | return countLeadingZeros<T>(~Value, ZB); | ||||
514 | } | ||||
515 | |||||
516 | /// Count the number of ones from the least significant bit to the first | ||||
517 | /// zero bit. | ||||
518 | /// | ||||
519 | /// Ex. countTrailingOnes(0x00FF00FF) == 8. | ||||
520 | /// Only unsigned integral types are allowed. | ||||
521 | /// | ||||
522 | /// \param ZB the behavior on an input of all ones. Only ZB_Width and | ||||
523 | /// ZB_Undefined are valid arguments. | ||||
524 | template <typename T> | ||||
525 | unsigned countTrailingOnes(T Value, ZeroBehavior ZB = ZB_Width) { | ||||
526 | static_assert(std::numeric_limits<T>::is_integer && | ||||
527 | !std::numeric_limits<T>::is_signed, | ||||
528 | "Only unsigned integral types are allowed."); | ||||
529 | return countTrailingZeros<T>(~Value, ZB); | ||||
530 | } | ||||
531 | |||||
532 | namespace detail { | ||||
533 | template <typename T, std::size_t SizeOfT> struct PopulationCounter { | ||||
534 | static unsigned count(T Value) { | ||||
535 | // Generic version, forward to 32 bits. | ||||
536 | static_assert(SizeOfT <= 4, "Not implemented!"); | ||||
537 | #if defined(__GNUC__4) | ||||
538 | return __builtin_popcount(Value); | ||||
539 | #else | ||||
540 | uint32_t v = Value; | ||||
541 | v = v - ((v >> 1) & 0x55555555); | ||||
542 | v = (v & 0x33333333) + ((v >> 2) & 0x33333333); | ||||
543 | return ((v + (v >> 4) & 0xF0F0F0F) * 0x1010101) >> 24; | ||||
544 | #endif | ||||
545 | } | ||||
546 | }; | ||||
547 | |||||
548 | template <typename T> struct PopulationCounter<T, 8> { | ||||
549 | static unsigned count(T Value) { | ||||
550 | #if defined(__GNUC__4) | ||||
551 | return __builtin_popcountll(Value); | ||||
552 | #else | ||||
553 | uint64_t v = Value; | ||||
554 | v = v - ((v >> 1) & 0x5555555555555555ULL); | ||||
555 | v = (v & 0x3333333333333333ULL) + ((v >> 2) & 0x3333333333333333ULL); | ||||
556 | v = (v + (v >> 4)) & 0x0F0F0F0F0F0F0F0FULL; | ||||
557 | return unsigned((uint64_t)(v * 0x0101010101010101ULL) >> 56); | ||||
558 | #endif | ||||
559 | } | ||||
560 | }; | ||||
561 | } // namespace detail | ||||
562 | |||||
563 | /// Count the number of set bits in a value. | ||||
564 | /// Ex. countPopulation(0xF000F000) = 8 | ||||
565 | /// Returns 0 if the word is zero. | ||||
566 | template <typename T> | ||||
567 | inline unsigned countPopulation(T Value) { | ||||
568 | static_assert(std::numeric_limits<T>::is_integer && | ||||
569 | !std::numeric_limits<T>::is_signed, | ||||
570 | "Only unsigned integral types are allowed."); | ||||
571 | return detail::PopulationCounter<T, sizeof(T)>::count(Value); | ||||
572 | } | ||||
573 | |||||
574 | /// Compile time Log2. | ||||
575 | /// Valid only for positive powers of two. | ||||
576 | template <size_t kValue> constexpr inline size_t CTLog2() { | ||||
577 | static_assert(kValue > 0 && llvm::isPowerOf2_64(kValue), | ||||
578 | "Value is not a valid power of 2"); | ||||
579 | return 1 + CTLog2<kValue / 2>(); | ||||
580 | } | ||||
581 | |||||
582 | template <> constexpr inline size_t CTLog2<1>() { return 0; } | ||||
583 | |||||
584 | /// Return the log base 2 of the specified value. | ||||
585 | inline double Log2(double Value) { | ||||
586 | #if defined(__ANDROID_API__) && __ANDROID_API__ < 18 | ||||
587 | return __builtin_log(Value) / __builtin_log(2.0); | ||||
588 | #else | ||||
589 | return log2(Value); | ||||
590 | #endif | ||||
591 | } | ||||
592 | |||||
593 | /// Return the floor log base 2 of the specified value, -1 if the value is zero. | ||||
594 | /// (32 bit edition.) | ||||
595 | /// Ex. Log2_32(32) == 5, Log2_32(1) == 0, Log2_32(0) == -1, Log2_32(6) == 2 | ||||
596 | inline unsigned Log2_32(uint32_t Value) { | ||||
597 | return 31 - countLeadingZeros(Value); | ||||
598 | } | ||||
599 | |||||
600 | /// Return the floor log base 2 of the specified value, -1 if the value is zero. | ||||
601 | /// (64 bit edition.) | ||||
602 | inline unsigned Log2_64(uint64_t Value) { | ||||
603 | return 63 - countLeadingZeros(Value); | ||||
604 | } | ||||
605 | |||||
606 | /// Return the ceil log base 2 of the specified value, 32 if the value is zero. | ||||
607 | /// (32 bit edition). | ||||
608 | /// Ex. Log2_32_Ceil(32) == 5, Log2_32_Ceil(1) == 0, Log2_32_Ceil(6) == 3 | ||||
609 | inline unsigned Log2_32_Ceil(uint32_t Value) { | ||||
610 | return 32 - countLeadingZeros(Value - 1); | ||||
611 | } | ||||
612 | |||||
613 | /// Return the ceil log base 2 of the specified value, 64 if the value is zero. | ||||
614 | /// (64 bit edition.) | ||||
615 | inline unsigned Log2_64_Ceil(uint64_t Value) { | ||||
616 | return 64 - countLeadingZeros(Value - 1); | ||||
617 | } | ||||
618 | |||||
619 | /// Return the greatest common divisor of the values using Euclid's algorithm. | ||||
620 | template <typename T> | ||||
621 | inline T greatestCommonDivisor(T A, T B) { | ||||
622 | while (B) { | ||||
623 | T Tmp = B; | ||||
624 | B = A % B; | ||||
625 | A = Tmp; | ||||
626 | } | ||||
627 | return A; | ||||
628 | } | ||||
629 | |||||
630 | inline uint64_t GreatestCommonDivisor64(uint64_t A, uint64_t B) { | ||||
631 | return greatestCommonDivisor<uint64_t>(A, B); | ||||
632 | } | ||||
633 | |||||
634 | /// This function takes a 64-bit integer and returns the bit equivalent double. | ||||
635 | inline double BitsToDouble(uint64_t Bits) { | ||||
636 | double D; | ||||
637 | static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes"); | ||||
638 | memcpy(&D, &Bits, sizeof(Bits)); | ||||
639 | return D; | ||||
640 | } | ||||
641 | |||||
642 | /// This function takes a 32-bit integer and returns the bit equivalent float. | ||||
643 | inline float BitsToFloat(uint32_t Bits) { | ||||
644 | float F; | ||||
645 | static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes"); | ||||
646 | memcpy(&F, &Bits, sizeof(Bits)); | ||||
647 | return F; | ||||
648 | } | ||||
649 | |||||
650 | /// This function takes a double and returns the bit equivalent 64-bit integer. | ||||
651 | /// Note that copying doubles around changes the bits of NaNs on some hosts, | ||||
652 | /// notably x86, so this routine cannot be used if these bits are needed. | ||||
653 | inline uint64_t DoubleToBits(double Double) { | ||||
654 | uint64_t Bits; | ||||
655 | static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes"); | ||||
656 | memcpy(&Bits, &Double, sizeof(Double)); | ||||
657 | return Bits; | ||||
658 | } | ||||
659 | |||||
660 | /// This function takes a float and returns the bit equivalent 32-bit integer. | ||||
661 | /// Note that copying floats around changes the bits of NaNs on some hosts, | ||||
662 | /// notably x86, so this routine cannot be used if these bits are needed. | ||||
663 | inline uint32_t FloatToBits(float Float) { | ||||
664 | uint32_t Bits; | ||||
665 | static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes"); | ||||
666 | memcpy(&Bits, &Float, sizeof(Float)); | ||||
667 | return Bits; | ||||
668 | } | ||||
669 | |||||
670 | /// A and B are either alignments or offsets. Return the minimum alignment that | ||||
671 | /// may be assumed after adding the two together. | ||||
672 | constexpr inline uint64_t MinAlign(uint64_t A, uint64_t B) { | ||||
673 | // The largest power of 2 that divides both A and B. | ||||
674 | // | ||||
675 | // Replace "-Value" by "1+~Value" in the following commented code to avoid | ||||
676 | // MSVC warning C4146 | ||||
677 | // return (A | B) & -(A | B); | ||||
678 | return (A | B) & (1 + ~(A | B)); | ||||
679 | } | ||||
680 | |||||
681 | /// Returns the next power of two (in 64-bits) that is strictly greater than A. | ||||
682 | /// Returns zero on overflow. | ||||
683 | inline uint64_t NextPowerOf2(uint64_t A) { | ||||
684 | A |= (A >> 1); | ||||
685 | A |= (A >> 2); | ||||
686 | A |= (A >> 4); | ||||
687 | A |= (A >> 8); | ||||
688 | A |= (A >> 16); | ||||
689 | A |= (A >> 32); | ||||
690 | return A + 1; | ||||
691 | } | ||||
692 | |||||
693 | /// Returns the power of two which is less than or equal to the given value. | ||||
694 | /// Essentially, it is a floor operation across the domain of powers of two. | ||||
695 | inline uint64_t PowerOf2Floor(uint64_t A) { | ||||
696 | if (!A) return 0; | ||||
697 | return 1ull << (63 - countLeadingZeros(A, ZB_Undefined)); | ||||
698 | } | ||||
699 | |||||
700 | /// Returns the power of two which is greater than or equal to the given value. | ||||
701 | /// Essentially, it is a ceil operation across the domain of powers of two. | ||||
702 | inline uint64_t PowerOf2Ceil(uint64_t A) { | ||||
703 | if (!A) | ||||
704 | return 0; | ||||
705 | return NextPowerOf2(A - 1); | ||||
706 | } | ||||
707 | |||||
708 | /// Returns the next integer (mod 2**64) that is greater than or equal to | ||||
709 | /// \p Value and is a multiple of \p Align. \p Align must be non-zero. | ||||
710 | /// | ||||
711 | /// If non-zero \p Skew is specified, the return value will be a minimal | ||||
712 | /// integer that is greater than or equal to \p Value and equal to | ||||
713 | /// \p Align * N + \p Skew for some integer N. If \p Skew is larger than | ||||
714 | /// \p Align, its value is adjusted to '\p Skew mod \p Align'. | ||||
715 | /// | ||||
716 | /// Examples: | ||||
717 | /// \code | ||||
718 | /// alignTo(5, 8) = 8 | ||||
719 | /// alignTo(17, 8) = 24 | ||||
720 | /// alignTo(~0LL, 8) = 0 | ||||
721 | /// alignTo(321, 255) = 510 | ||||
722 | /// | ||||
723 | /// alignTo(5, 8, 7) = 7 | ||||
724 | /// alignTo(17, 8, 1) = 17 | ||||
725 | /// alignTo(~0LL, 8, 3) = 3 | ||||
726 | /// alignTo(321, 255, 42) = 552 | ||||
727 | /// \endcode | ||||
728 | inline uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew = 0) { | ||||
729 | assert(Align != 0u && "Align can't be 0.")(static_cast <bool> (Align != 0u && "Align can't be 0." ) ? void (0) : __assert_fail ("Align != 0u && \"Align can't be 0.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/MathExtras.h" , 729, __extension__ __PRETTY_FUNCTION__)); | ||||
730 | Skew %= Align; | ||||
731 | return (Value + Align - 1 - Skew) / Align * Align + Skew; | ||||
732 | } | ||||
733 | |||||
734 | /// Returns the next integer (mod 2**64) that is greater than or equal to | ||||
735 | /// \p Value and is a multiple of \c Align. \c Align must be non-zero. | ||||
736 | template <uint64_t Align> constexpr inline uint64_t alignTo(uint64_t Value) { | ||||
737 | static_assert(Align != 0u, "Align must be non-zero"); | ||||
738 | return (Value + Align - 1) / Align * Align; | ||||
739 | } | ||||
740 | |||||
741 | /// Returns the integer ceil(Numerator / Denominator). | ||||
742 | inline uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator) { | ||||
743 | return alignTo(Numerator, Denominator) / Denominator; | ||||
744 | } | ||||
745 | |||||
746 | /// Returns the integer nearest(Numerator / Denominator). | ||||
747 | inline uint64_t divideNearest(uint64_t Numerator, uint64_t Denominator) { | ||||
748 | return (Numerator + (Denominator / 2)) / Denominator; | ||||
749 | } | ||||
750 | |||||
751 | /// Returns the largest uint64_t less than or equal to \p Value and is | ||||
752 | /// \p Skew mod \p Align. \p Align must be non-zero | ||||
753 | inline uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew = 0) { | ||||
754 | assert(Align != 0u && "Align can't be 0.")(static_cast <bool> (Align != 0u && "Align can't be 0." ) ? void (0) : __assert_fail ("Align != 0u && \"Align can't be 0.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/MathExtras.h" , 754, __extension__ __PRETTY_FUNCTION__)); | ||||
755 | Skew %= Align; | ||||
756 | return (Value - Skew) / Align * Align + Skew; | ||||
757 | } | ||||
758 | |||||
759 | /// Sign-extend the number in the bottom B bits of X to a 32-bit integer. | ||||
760 | /// Requires 0 < B <= 32. | ||||
761 | template <unsigned B> constexpr inline int32_t SignExtend32(uint32_t X) { | ||||
762 | static_assert(B > 0, "Bit width can't be 0."); | ||||
763 | static_assert(B <= 32, "Bit width out of range."); | ||||
764 | return int32_t(X << (32 - B)) >> (32 - B); | ||||
765 | } | ||||
766 | |||||
767 | /// Sign-extend the number in the bottom B bits of X to a 32-bit integer. | ||||
768 | /// Requires 0 < B <= 32. | ||||
769 | inline int32_t SignExtend32(uint32_t X, unsigned B) { | ||||
770 | assert(B > 0 && "Bit width can't be 0.")(static_cast <bool> (B > 0 && "Bit width can't be 0." ) ? void (0) : __assert_fail ("B > 0 && \"Bit width can't be 0.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/MathExtras.h" , 770, __extension__ __PRETTY_FUNCTION__)); | ||||
771 | assert(B <= 32 && "Bit width out of range.")(static_cast <bool> (B <= 32 && "Bit width out of range." ) ? void (0) : __assert_fail ("B <= 32 && \"Bit width out of range.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/MathExtras.h" , 771, __extension__ __PRETTY_FUNCTION__)); | ||||
772 | return int32_t(X << (32 - B)) >> (32 - B); | ||||
773 | } | ||||
774 | |||||
775 | /// Sign-extend the number in the bottom B bits of X to a 64-bit integer. | ||||
776 | /// Requires 0 < B <= 64. | ||||
777 | template <unsigned B> constexpr inline int64_t SignExtend64(uint64_t x) { | ||||
778 | static_assert(B > 0, "Bit width can't be 0."); | ||||
779 | static_assert(B <= 64, "Bit width out of range."); | ||||
780 | return int64_t(x << (64 - B)) >> (64 - B); | ||||
781 | } | ||||
782 | |||||
783 | /// Sign-extend the number in the bottom B bits of X to a 64-bit integer. | ||||
784 | /// Requires 0 < B <= 64. | ||||
785 | inline int64_t SignExtend64(uint64_t X, unsigned B) { | ||||
786 | assert(B > 0 && "Bit width can't be 0.")(static_cast <bool> (B > 0 && "Bit width can't be 0." ) ? void (0) : __assert_fail ("B > 0 && \"Bit width can't be 0.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/MathExtras.h" , 786, __extension__ __PRETTY_FUNCTION__)); | ||||
787 | assert(B <= 64 && "Bit width out of range.")(static_cast <bool> (B <= 64 && "Bit width out of range." ) ? void (0) : __assert_fail ("B <= 64 && \"Bit width out of range.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/Support/MathExtras.h" , 787, __extension__ __PRETTY_FUNCTION__)); | ||||
788 | return int64_t(X << (64 - B)) >> (64 - B); | ||||
789 | } | ||||
790 | |||||
791 | /// Subtract two unsigned integers, X and Y, of type T and return the absolute | ||||
792 | /// value of the result. | ||||
793 | template <typename T> | ||||
794 | std::enable_if_t<std::is_unsigned<T>::value, T> AbsoluteDifference(T X, T Y) { | ||||
795 | return X > Y ? (X - Y) : (Y - X); | ||||
796 | } | ||||
797 | |||||
798 | /// Add two unsigned integers, X and Y, of type T. Clamp the result to the | ||||
799 | /// maximum representable value of T on overflow. ResultOverflowed indicates if | ||||
800 | /// the result is larger than the maximum representable value of type T. | ||||
801 | template <typename T> | ||||
802 | std::enable_if_t<std::is_unsigned<T>::value, T> | ||||
803 | SaturatingAdd(T X, T Y, bool *ResultOverflowed = nullptr) { | ||||
804 | bool Dummy; | ||||
805 | bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy; | ||||
806 | // Hacker's Delight, p. 29 | ||||
807 | T Z = X + Y; | ||||
808 | Overflowed = (Z < X || Z < Y); | ||||
809 | if (Overflowed) | ||||
810 | return std::numeric_limits<T>::max(); | ||||
811 | else | ||||
812 | return Z; | ||||
813 | } | ||||
814 | |||||
815 | /// Multiply two unsigned integers, X and Y, of type T. Clamp the result to the | ||||
816 | /// maximum representable value of T on overflow. ResultOverflowed indicates if | ||||
817 | /// the result is larger than the maximum representable value of type T. | ||||
818 | template <typename T> | ||||
819 | std::enable_if_t<std::is_unsigned<T>::value, T> | ||||
820 | SaturatingMultiply(T X, T Y, bool *ResultOverflowed = nullptr) { | ||||
821 | bool Dummy; | ||||
822 | bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy; | ||||
823 | |||||
824 | // Hacker's Delight, p. 30 has a different algorithm, but we don't use that | ||||
825 | // because it fails for uint16_t (where multiplication can have undefined | ||||
826 | // behavior due to promotion to int), and requires a division in addition | ||||
827 | // to the multiplication. | ||||
828 | |||||
829 | Overflowed = false; | ||||
830 | |||||
831 | // Log2(Z) would be either Log2Z or Log2Z + 1. | ||||
832 | // Special case: if X or Y is 0, Log2_64 gives -1, and Log2Z | ||||
833 | // will necessarily be less than Log2Max as desired. | ||||
834 | int Log2Z = Log2_64(X) + Log2_64(Y); | ||||
835 | const T Max = std::numeric_limits<T>::max(); | ||||
836 | int Log2Max = Log2_64(Max); | ||||
837 | if (Log2Z < Log2Max) { | ||||
838 | return X * Y; | ||||
839 | } | ||||
840 | if (Log2Z > Log2Max) { | ||||
841 | Overflowed = true; | ||||
842 | return Max; | ||||
843 | } | ||||
844 | |||||
845 | // We're going to use the top bit, and maybe overflow one | ||||
846 | // bit past it. Multiply all but the bottom bit then add | ||||
847 | // that on at the end. | ||||
848 | T Z = (X >> 1) * Y; | ||||
849 | if (Z & ~(Max >> 1)) { | ||||
850 | Overflowed = true; | ||||
851 | return Max; | ||||
852 | } | ||||
853 | Z <<= 1; | ||||
854 | if (X & 1) | ||||
855 | return SaturatingAdd(Z, Y, ResultOverflowed); | ||||
856 | |||||
857 | return Z; | ||||
858 | } | ||||
859 | |||||
860 | /// Multiply two unsigned integers, X and Y, and add the unsigned integer, A to | ||||
861 | /// the product. Clamp the result to the maximum representable value of T on | ||||
862 | /// overflow. ResultOverflowed indicates if the result is larger than the | ||||
863 | /// maximum representable value of type T. | ||||
864 | template <typename T> | ||||
865 | std::enable_if_t<std::is_unsigned<T>::value, T> | ||||
866 | SaturatingMultiplyAdd(T X, T Y, T A, bool *ResultOverflowed = nullptr) { | ||||
867 | bool Dummy; | ||||
868 | bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy; | ||||
869 | |||||
870 | T Product = SaturatingMultiply(X, Y, &Overflowed); | ||||
871 | if (Overflowed) | ||||
872 | return Product; | ||||
873 | |||||
874 | return SaturatingAdd(A, Product, &Overflowed); | ||||
875 | } | ||||
876 | |||||
877 | /// Use this rather than HUGE_VALF; the latter causes warnings on MSVC. | ||||
878 | extern const float huge_valf; | ||||
879 | |||||
880 | |||||
881 | /// Add two signed integers, computing the two's complement truncated result, | ||||
882 | /// returning true if overflow occured. | ||||
883 | template <typename T> | ||||
884 | std::enable_if_t<std::is_signed<T>::value, T> AddOverflow(T X, T Y, T &Result) { | ||||
885 | #if __has_builtin(__builtin_add_overflow)1 | ||||
886 | return __builtin_add_overflow(X, Y, &Result); | ||||
887 | #else | ||||
888 | // Perform the unsigned addition. | ||||
889 | using U = std::make_unsigned_t<T>; | ||||
890 | const U UX = static_cast<U>(X); | ||||
891 | const U UY = static_cast<U>(Y); | ||||
892 | const U UResult = UX + UY; | ||||
893 | |||||
894 | // Convert to signed. | ||||
895 | Result = static_cast<T>(UResult); | ||||
896 | |||||
897 | // Adding two positive numbers should result in a positive number. | ||||
898 | if (X > 0 && Y > 0) | ||||
899 | return Result <= 0; | ||||
900 | // Adding two negatives should result in a negative number. | ||||
901 | if (X < 0 && Y < 0) | ||||
902 | return Result >= 0; | ||||
903 | return false; | ||||
904 | #endif | ||||
905 | } | ||||
906 | |||||
907 | /// Subtract two signed integers, computing the two's complement truncated | ||||
908 | /// result, returning true if an overflow ocurred. | ||||
909 | template <typename T> | ||||
910 | std::enable_if_t<std::is_signed<T>::value, T> SubOverflow(T X, T Y, T &Result) { | ||||
911 | #if __has_builtin(__builtin_sub_overflow)1 | ||||
912 | return __builtin_sub_overflow(X, Y, &Result); | ||||
913 | #else | ||||
914 | // Perform the unsigned addition. | ||||
915 | using U = std::make_unsigned_t<T>; | ||||
916 | const U UX = static_cast<U>(X); | ||||
917 | const U UY = static_cast<U>(Y); | ||||
918 | const U UResult = UX - UY; | ||||
919 | |||||
920 | // Convert to signed. | ||||
921 | Result = static_cast<T>(UResult); | ||||
922 | |||||
923 | // Subtracting a positive number from a negative results in a negative number. | ||||
924 | if (X <= 0 && Y > 0) | ||||
925 | return Result >= 0; | ||||
926 | // Subtracting a negative number from a positive results in a positive number. | ||||
927 | if (X >= 0 && Y < 0) | ||||
928 | return Result <= 0; | ||||
929 | return false; | ||||
930 | #endif | ||||
931 | } | ||||
932 | |||||
933 | /// Multiply two signed integers, computing the two's complement truncated | ||||
934 | /// result, returning true if an overflow ocurred. | ||||
935 | template <typename T> | ||||
936 | std::enable_if_t<std::is_signed<T>::value, T> MulOverflow(T X, T Y, T &Result) { | ||||
937 | // Perform the unsigned multiplication on absolute values. | ||||
938 | using U = std::make_unsigned_t<T>; | ||||
939 | const U UX = X < 0 ? (0 - static_cast<U>(X)) : static_cast<U>(X); | ||||
940 | const U UY = Y < 0 ? (0 - static_cast<U>(Y)) : static_cast<U>(Y); | ||||
941 | const U UResult = UX * UY; | ||||
942 | |||||
943 | // Convert to signed. | ||||
944 | const bool IsNegative = (X < 0) ^ (Y < 0); | ||||
945 | Result = IsNegative ? (0 - UResult) : UResult; | ||||
946 | |||||
947 | // If any of the args was 0, result is 0 and no overflow occurs. | ||||
948 | if (UX == 0 || UY == 0) | ||||
949 | return false; | ||||
950 | |||||
951 | // UX and UY are in [1, 2^n], where n is the number of digits. | ||||
952 | // Check how the max allowed absolute value (2^n for negative, 2^(n-1) for | ||||
953 | // positive) divided by an argument compares to the other. | ||||
954 | if (IsNegative) | ||||
955 | return UX > (static_cast<U>(std::numeric_limits<T>::max()) + U(1)) / UY; | ||||
956 | else | ||||
957 | return UX > (static_cast<U>(std::numeric_limits<T>::max())) / UY; | ||||
958 | } | ||||
959 | |||||
960 | } // End llvm namespace | ||||
961 | |||||
962 | #endif |