45 "x86-experimental-lvi-inline-asm-hardening",
46 cl::desc(
"Harden inline assembly code that may be vulnerable to Load Value"
47 " Injection (LVI). This feature is experimental."),
cl::Hidden);
50 if (Scale != 1 && Scale != 2 && Scale != 4 && Scale != 8) {
51 ErrMsg =
"scale factor in address must be 1, 2, 4 or 8";
59 static const char OpPrecedence[] = {
87 unsigned ForcedDataPrefix = 0;
97 VEXEncoding ForcedVEXEncoding = VEXEncoding_Default;
100 DispEncoding_Default,
105 DispEncoding ForcedDispEncoding = DispEncoding_Default;
108 SMLoc consumeToken() {
116 assert(getParser().getStreamer().getTargetStreamer() &&
117 "do not have a target streamer");
124 bool matchingInlineAsm,
unsigned VariantID = 0) {
127 SwitchMode(X86::Is32Bit);
129 MissingFeatures, matchingInlineAsm,
132 SwitchMode(X86::Is16Bit);
136 enum InfixCalculatorTok {
161 enum IntelOperatorKind {
168 enum MasmOperatorKind {
175 class InfixCalculator {
176 typedef std::pair< InfixCalculatorTok, int64_t > ICToken;
180 bool isUnaryOperator(InfixCalculatorTok
Op)
const {
181 return Op == IC_NEG ||
Op == IC_NOT;
185 int64_t popOperand() {
186 assert (!PostfixStack.empty() &&
"Poped an empty stack!");
188 if (!(
Op.first == IC_IMM ||
Op.first == IC_REGISTER))
192 void pushOperand(InfixCalculatorTok
Op, int64_t Val = 0) {
193 assert ((
Op == IC_IMM ||
Op == IC_REGISTER) &&
194 "Unexpected operand!");
195 PostfixStack.push_back(std::make_pair(
Op, Val));
198 void popOperator() { InfixOperatorStack.pop_back(); }
199 void pushOperator(InfixCalculatorTok
Op) {
201 if (InfixOperatorStack.empty()) {
202 InfixOperatorStack.push_back(
Op);
209 unsigned Idx = InfixOperatorStack.size() - 1;
210 InfixCalculatorTok StackOp = InfixOperatorStack[Idx];
211 if (OpPrecedence[
Op] > OpPrecedence[StackOp] || StackOp == IC_LPAREN) {
212 InfixOperatorStack.push_back(
Op);
218 unsigned ParenCount = 0;
221 if (InfixOperatorStack.empty())
224 Idx = InfixOperatorStack.size() - 1;
225 StackOp = InfixOperatorStack[Idx];
226 if (!(OpPrecedence[StackOp] >= OpPrecedence[
Op] || ParenCount))
231 if (!ParenCount && StackOp == IC_LPAREN)
234 if (StackOp == IC_RPAREN) {
236 InfixOperatorStack.pop_back();
237 }
else if (StackOp == IC_LPAREN) {
239 InfixOperatorStack.pop_back();
241 InfixOperatorStack.pop_back();
242 PostfixStack.push_back(std::make_pair(StackOp, 0));
246 InfixOperatorStack.push_back(
Op);
251 while (!InfixOperatorStack.empty()) {
252 InfixCalculatorTok StackOp = InfixOperatorStack.
pop_back_val();
253 if (StackOp != IC_LPAREN && StackOp != IC_RPAREN)
254 PostfixStack.push_back(std::make_pair(StackOp, 0));
257 if (PostfixStack.empty())
261 for (
unsigned i = 0,
e = PostfixStack.size();
i !=
e; ++
i) {
262 ICToken
Op = PostfixStack[
i];
263 if (
Op.first == IC_IMM ||
Op.first == IC_REGISTER) {
264 OperandStack.push_back(
Op);
265 }
else if (isUnaryOperator(
Op.first)) {
266 assert (OperandStack.size() > 0 &&
"Too few operands.");
268 assert (Operand.first == IC_IMM &&
269 "Unary operation with a register!");
275 OperandStack.push_back(std::make_pair(IC_IMM, -Operand.second));
278 OperandStack.push_back(std::make_pair(IC_IMM, ~Operand.second));
282 assert (OperandStack.size() > 1 &&
"Too few operands.");
291 Val = Op1.second + Op2.second;
292 OperandStack.push_back(std::make_pair(IC_IMM, Val));
295 Val = Op1.second - Op2.second;
296 OperandStack.push_back(std::make_pair(IC_IMM, Val));
299 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
300 "Multiply operation with an immediate and a register!");
301 Val = Op1.second * Op2.second;
302 OperandStack.push_back(std::make_pair(IC_IMM, Val));
305 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
306 "Divide operation with an immediate and a register!");
307 assert (Op2.second != 0 &&
"Division by zero!");
308 Val = Op1.second / Op2.second;
309 OperandStack.push_back(std::make_pair(IC_IMM, Val));
312 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
313 "Modulo operation with an immediate and a register!");
314 Val = Op1.second % Op2.second;
315 OperandStack.push_back(std::make_pair(IC_IMM, Val));
318 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
319 "Or operation with an immediate and a register!");
320 Val = Op1.second | Op2.second;
321 OperandStack.push_back(std::make_pair(IC_IMM, Val));
324 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
325 "Xor operation with an immediate and a register!");
326 Val = Op1.second ^ Op2.second;
327 OperandStack.push_back(std::make_pair(IC_IMM, Val));
330 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
331 "And operation with an immediate and a register!");
332 Val = Op1.second & Op2.second;
333 OperandStack.push_back(std::make_pair(IC_IMM, Val));
336 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
337 "Left shift operation with an immediate and a register!");
338 Val = Op1.second << Op2.second;
339 OperandStack.push_back(std::make_pair(IC_IMM, Val));
342 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
343 "Right shift operation with an immediate and a register!");
344 Val = Op1.second >> Op2.second;
345 OperandStack.push_back(std::make_pair(IC_IMM, Val));
348 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
349 "Equals operation with an immediate and a register!");
350 Val = (Op1.second == Op2.second) ? -1 : 0;
351 OperandStack.push_back(std::make_pair(IC_IMM, Val));
354 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
355 "Not-equals operation with an immediate and a register!");
356 Val = (Op1.second != Op2.second) ? -1 : 0;
357 OperandStack.push_back(std::make_pair(IC_IMM, Val));
360 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
361 "Less-than operation with an immediate and a register!");
362 Val = (Op1.second < Op2.second) ? -1 : 0;
363 OperandStack.push_back(std::make_pair(IC_IMM, Val));
366 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
367 "Less-than-or-equal operation with an immediate and a "
369 Val = (Op1.second <= Op2.second) ? -1 : 0;
370 OperandStack.push_back(std::make_pair(IC_IMM, Val));
373 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
374 "Greater-than operation with an immediate and a register!");
375 Val = (Op1.second > Op2.second) ? -1 : 0;
376 OperandStack.push_back(std::make_pair(IC_IMM, Val));
379 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
380 "Greater-than-or-equal operation with an immediate and a "
382 Val = (Op1.second >= Op2.second) ? -1 : 0;
383 OperandStack.push_back(std::make_pair(IC_IMM, Val));
388 assert (OperandStack.size() == 1 &&
"Expected a single result.");
393 enum IntelExprState {
424 class IntelExprStateMachine {
425 IntelExprState State, PrevState;
426 unsigned BaseReg, IndexReg, TmpReg, Scale;
435 bool AttachToOperandIdx;
437 SMLoc OffsetOperatorLoc;
442 ErrMsg =
"cannot use more than one symbol in memory operand";
451 IntelExprStateMachine()
452 : State(IES_INIT), PrevState(IES_ERROR), BaseReg(0), IndexReg(0),
453 TmpReg(0), Scale(0), Imm(0), Sym(nullptr), BracCount(0),
457 void addImm(int64_t imm) { Imm += imm; }
458 short getBracCount()
const {
return BracCount; }
459 bool isMemExpr()
const {
return MemExpr; }
460 bool isOffsetOperator()
const {
return OffsetOperator; }
461 SMLoc getOffsetLoc()
const {
return OffsetOperatorLoc; }
462 unsigned getBaseReg()
const {
return BaseReg; }
463 unsigned getIndexReg()
const {
return IndexReg; }
464 unsigned getScale()
const {
return Scale; }
466 StringRef getSymName()
const {
return SymName; }
468 unsigned getSize()
const {
return CurType.
Size; }
469 unsigned getElementSize()
const {
return CurType.
ElementSize; }
470 unsigned getLength()
const {
return CurType.
Length; }
471 int64_t getImm() {
return Imm + IC.execute(); }
472 bool isValidEndState()
const {
473 return State == IES_RBRAC || State == IES_INTEGER;
480 void setAppendAfterOperand() { AttachToOperandIdx =
true; }
482 bool isPIC()
const {
return IsPIC; }
483 void setPIC() { IsPIC =
true; }
485 bool hadError()
const {
return State == IES_ERROR; }
491 if (IsPIC && AttachToOperandIdx)
492 ErrMsg =
"Don't use 2 or more regs for mem offset in PIC model!";
494 ErrMsg =
"BaseReg/IndexReg already set!";
499 IntelExprState CurrState = State;
508 IC.pushOperator(IC_OR);
511 PrevState = CurrState;
514 IntelExprState CurrState = State;
523 IC.pushOperator(IC_XOR);
526 PrevState = CurrState;
529 IntelExprState CurrState = State;
538 IC.pushOperator(IC_AND);
541 PrevState = CurrState;
544 IntelExprState CurrState = State;
553 IC.pushOperator(IC_EQ);
556 PrevState = CurrState;
559 IntelExprState CurrState = State;
568 IC.pushOperator(IC_NE);
571 PrevState = CurrState;
574 IntelExprState CurrState = State;
583 IC.pushOperator(IC_LT);
586 PrevState = CurrState;
589 IntelExprState CurrState = State;
598 IC.pushOperator(IC_LE);
601 PrevState = CurrState;
604 IntelExprState CurrState = State;
613 IC.pushOperator(IC_GT);
616 PrevState = CurrState;
619 IntelExprState CurrState = State;
628 IC.pushOperator(IC_GE);
631 PrevState = CurrState;
634 IntelExprState CurrState = State;
643 IC.pushOperator(IC_LSHIFT);
646 PrevState = CurrState;
649 IntelExprState CurrState = State;
658 IC.pushOperator(IC_RSHIFT);
661 PrevState = CurrState;
664 IntelExprState CurrState = State;
674 IC.pushOperator(IC_PLUS);
675 if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) {
682 return regsUseUpError(ErrMsg);
689 PrevState = CurrState;
693 IntelExprState CurrState = State;
724 if (CurrState == IES_REGISTER || CurrState == IES_RPAREN ||
725 CurrState == IES_INTEGER || CurrState == IES_RBRAC ||
726 CurrState == IES_OFFSET)
727 IC.pushOperator(IC_MINUS);
728 else if (PrevState == IES_REGISTER && CurrState == IES_MULTIPLY) {
730 ErrMsg =
"Scale can't be negative";
733 IC.pushOperator(IC_NEG);
734 if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) {
741 return regsUseUpError(ErrMsg);
748 PrevState = CurrState;
752 IntelExprState CurrState = State;
778 IC.pushOperator(IC_NOT);
781 PrevState = CurrState;
784 IntelExprState CurrState = State;
792 State = IES_REGISTER;
794 IC.pushOperand(IC_REGISTER);
798 if (PrevState == IES_INTEGER) {
800 return regsUseUpError(ErrMsg);
801 State = IES_REGISTER;
804 Scale = IC.popOperand();
807 IC.pushOperand(IC_IMM);
814 PrevState = CurrState;
822 if (ParsingMSInlineAsm)
826 if (
auto *CE = dyn_cast<MCConstantExpr>(SymRef))
827 return onInteger(
CE->getValue(), ErrMsg);
840 if (setSymRef(SymRef, SymRefName, ErrMsg))
844 IC.pushOperand(IC_IMM);
845 if (ParsingMSInlineAsm)
852 bool onInteger(int64_t TmpInt,
StringRef &ErrMsg) {
853 IntelExprState CurrState = State;
879 if (PrevState == IES_REGISTER && CurrState == IES_MULTIPLY) {
882 return regsUseUpError(ErrMsg);
890 IC.pushOperand(IC_IMM, TmpInt);
894 PrevState = CurrState;
906 State = IES_MULTIPLY;
907 IC.pushOperator(IC_MULTIPLY);
920 IC.pushOperator(IC_DIVIDE);
933 IC.pushOperator(IC_MOD);
949 IC.pushOperator(IC_PLUS);
955 assert(!BracCount &&
"BracCount should be zero on parsing's start");
964 IntelExprState CurrState = State;
973 if (BracCount-- != 1) {
974 ErrMsg =
"unexpected bracket encountered";
978 if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) {
985 return regsUseUpError(ErrMsg);
992 PrevState = CurrState;
996 IntelExprState CurrState = State;
1022 IC.pushOperator(IC_LPAREN);
1025 PrevState = CurrState;
1039 IC.pushOperator(IC_RPAREN);
1045 bool ParsingMSInlineAsm,
StringRef &ErrMsg) {
1049 ErrMsg =
"unexpected offset operator expression";
1054 if (setSymRef(Val,
ID, ErrMsg))
1056 OffsetOperator =
true;
1057 OffsetOperatorLoc = OffsetLoc;
1061 IC.pushOperand(IC_IMM);
1062 if (ParsingMSInlineAsm) {
1085 bool MatchingInlineAsm =
false) {
1087 if (MatchingInlineAsm) {
1088 if (!getLexer().isAtStartOfStatement())
1092 return Parser.
Error(L,
Msg, Range);
1097 bool ParseRegister(
unsigned &RegNo,
SMLoc &StartLoc,
SMLoc &EndLoc,
1098 bool RestoreOnFailure);
1100 std::unique_ptr<X86Operand> DefaultMemSIOperand(
SMLoc Loc);
1101 std::unique_ptr<X86Operand> DefaultMemDIOperand(
SMLoc Loc);
1102 bool IsSIReg(
unsigned Reg);
1103 unsigned GetSIDIForRegClass(
unsigned RegClassID,
unsigned Reg,
bool IsSIReg);
1106 std::unique_ptr<llvm::MCParsedAsmOperand> &&Src,
1107 std::unique_ptr<llvm::MCParsedAsmOperand> &&Dst);
1115 bool ParseIntelDotOperator(IntelExprStateMachine &SM,
SMLoc &End);
1117 unsigned ParseIntelInlineAsmOperator(
unsigned OpKind);
1119 bool ParseMasmOperator(
unsigned OpKind, int64_t &Val);
1121 bool ParseIntelNamedOperator(
StringRef Name, IntelExprStateMachine &SM,
1122 bool &ParseError,
SMLoc &End);
1123 bool ParseMasmNamedOperator(
StringRef Name, IntelExprStateMachine &SM,
1124 bool &ParseError,
SMLoc &End);
1125 void RewriteIntelExpression(IntelExprStateMachine &SM,
SMLoc Start,
1127 bool ParseIntelExpression(IntelExprStateMachine &SM,
SMLoc &End);
1128 bool ParseIntelInlineAsmIdentifier(
const MCExpr *&Val,
StringRef &Identifier,
1130 bool IsUnevaluatedOperand,
SMLoc &End,
1131 bool IsParsingOffsetOperator =
false);
1133 IntelExprStateMachine &SM);
1135 bool ParseMemOperand(
unsigned SegReg,
const MCExpr *Disp,
SMLoc StartLoc,
1140 bool ParseIntelMemoryOperandSize(
unsigned &Size);
1141 bool CreateMemForMSInlineAsm(
unsigned SegReg,
const MCExpr *Disp,
1142 unsigned BaseReg,
unsigned IndexReg,
1148 bool parseDirectiveArch();
1149 bool parseDirectiveNops(
SMLoc L);
1150 bool parseDirectiveEven(
SMLoc L);
1154 bool parseDirectiveFPOProc(
SMLoc L);
1155 bool parseDirectiveFPOSetFrame(
SMLoc L);
1156 bool parseDirectiveFPOPushReg(
SMLoc L);
1157 bool parseDirectiveFPOStackAlloc(
SMLoc L);
1158 bool parseDirectiveFPOStackAlign(
SMLoc L);
1159 bool parseDirectiveFPOEndPrologue(
SMLoc L);
1160 bool parseDirectiveFPOEndProc(
SMLoc L);
1163 bool parseSEHRegisterNumber(
unsigned RegClassID,
unsigned &RegNo);
1164 bool parseDirectiveSEHPushReg(
SMLoc);
1165 bool parseDirectiveSEHSetFrame(
SMLoc);
1166 bool parseDirectiveSEHSaveReg(
SMLoc);
1167 bool parseDirectiveSEHSaveXMM(
SMLoc);
1168 bool parseDirectiveSEHPushFrame(
SMLoc);
1170 unsigned checkTargetMatchPredicate(
MCInst &Inst)
override;
1176 void emitWarningForSpecialLVIInstruction(
SMLoc Loc);
1184 bool MatchAndEmitInstruction(
SMLoc IDLoc,
unsigned &Opcode,
1187 bool MatchingInlineAsm)
override;
1193 bool MatchingInlineAsm);
1195 bool MatchAndEmitATTInstruction(
SMLoc IDLoc,
unsigned &Opcode,
1198 bool MatchingInlineAsm);
1200 bool MatchAndEmitIntelInstruction(
SMLoc IDLoc,
unsigned &Opcode,
1203 bool MatchingInlineAsm);
1205 bool OmitRegisterFromClobberLists(
unsigned RegNo)
override;
1212 bool ParseZ(std::unique_ptr<X86Operand> &Z,
const SMLoc &StartLoc);
1214 bool is64BitMode()
const {
1216 return getSTI().getFeatureBits()[X86::Is64Bit];
1218 bool is32BitMode()
const {
1220 return getSTI().getFeatureBits()[X86::Is32Bit];
1222 bool is16BitMode()
const {
1224 return getSTI().getFeatureBits()[X86::Is16Bit];
1226 void SwitchMode(
unsigned mode) {
1228 FeatureBitset AllModes({X86::Is64Bit, X86::Is32Bit, X86::Is16Bit});
1232 setAvailableFeatures(FB);
1237 unsigned getPointerWidth() {
1238 if (is16BitMode())
return 16;
1239 if (is32BitMode())
return 32;
1240 if (is64BitMode())
return 64;
1244 bool isParsingIntelSyntax() {
1245 return getParser().getAssemblerDialect();
1251 #define GET_ASSEMBLER_HEADER
1252 #include "X86GenAsmMatcher.inc"
1257 enum X86MatchResultTy {
1258 Match_Unsupported = FIRST_TARGET_MATCH_RESULT_TY,
1259 #define GET_OPERAND_DIAGNOSTIC_TYPES
1260 #include "X86GenAsmMatcher.inc"
1271 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
1274 bool ParseRegister(
unsigned &RegNo,
SMLoc &StartLoc,
SMLoc &EndLoc)
override;
1276 SMLoc &EndLoc)
override;
1278 bool parsePrimaryExpr(
const MCExpr *&Res,
SMLoc &EndLoc)
override;
1283 bool ParseDirective(
AsmToken DirectiveID)
override;
1295 unsigned Scale,
bool Is64BitMode,
1302 !(BaseReg == X86::RIP || BaseReg == X86::EIP ||
1303 X86MCRegisterClasses[X86::GR16RegClassID].
contains(BaseReg) ||
1304 X86MCRegisterClasses[X86::GR32RegClassID].
contains(BaseReg) ||
1305 X86MCRegisterClasses[X86::GR64RegClassID].
contains(BaseReg))) {
1306 ErrMsg =
"invalid base+index expression";
1310 if (IndexReg != 0 &&
1311 !(IndexReg == X86::EIZ || IndexReg == X86::RIZ ||
1312 X86MCRegisterClasses[X86::GR16RegClassID].
contains(IndexReg) ||
1313 X86MCRegisterClasses[X86::GR32RegClassID].
contains(IndexReg) ||
1314 X86MCRegisterClasses[X86::GR64RegClassID].
contains(IndexReg) ||
1315 X86MCRegisterClasses[X86::VR128XRegClassID].
contains(IndexReg) ||
1316 X86MCRegisterClasses[X86::VR256XRegClassID].
contains(IndexReg) ||
1317 X86MCRegisterClasses[X86::VR512RegClassID].
contains(IndexReg))) {
1318 ErrMsg =
"invalid base+index expression";
1322 if (((BaseReg == X86::RIP || BaseReg == X86::EIP) && IndexReg != 0) ||
1323 IndexReg == X86::EIP || IndexReg == X86::RIP ||
1324 IndexReg ==
X86::ESP || IndexReg == X86::RSP) {
1325 ErrMsg =
"invalid base+index expression";
1331 if (X86MCRegisterClasses[X86::GR16RegClassID].
contains(BaseReg) &&
1332 (Is64BitMode || (BaseReg != X86::BX && BaseReg != X86::BP &&
1333 BaseReg !=
X86::SI && BaseReg != X86::DI))) {
1334 ErrMsg =
"invalid 16-bit base register";
1339 X86MCRegisterClasses[X86::GR16RegClassID].
contains(IndexReg)) {
1340 ErrMsg =
"16-bit memory operand may not include only index register";
1344 if (BaseReg != 0 && IndexReg != 0) {
1345 if (X86MCRegisterClasses[X86::GR64RegClassID].
contains(BaseReg) &&
1346 (X86MCRegisterClasses[X86::GR16RegClassID].
contains(IndexReg) ||
1347 X86MCRegisterClasses[X86::GR32RegClassID].
contains(IndexReg) ||
1348 IndexReg == X86::EIZ)) {
1349 ErrMsg =
"base register is 64-bit, but index register is not";
1352 if (X86MCRegisterClasses[X86::GR32RegClassID].
contains(BaseReg) &&
1353 (X86MCRegisterClasses[X86::GR16RegClassID].
contains(IndexReg) ||
1354 X86MCRegisterClasses[X86::GR64RegClassID].
contains(IndexReg) ||
1355 IndexReg == X86::RIZ)) {
1356 ErrMsg =
"base register is 32-bit, but index register is not";
1359 if (X86MCRegisterClasses[X86::GR16RegClassID].
contains(BaseReg)) {
1360 if (X86MCRegisterClasses[X86::GR32RegClassID].
contains(IndexReg) ||
1361 X86MCRegisterClasses[X86::GR64RegClassID].
contains(IndexReg)) {
1362 ErrMsg =
"base register is 16-bit, but index register is not";
1365 if ((BaseReg != X86::BX && BaseReg != X86::BP) ||
1366 (IndexReg !=
X86::SI && IndexReg != X86::DI)) {
1367 ErrMsg =
"invalid 16-bit base/index register combination";
1374 if (!Is64BitMode && BaseReg != 0 &&
1375 (BaseReg == X86::RIP || BaseReg == X86::EIP)) {
1376 ErrMsg =
"IP-relative addressing requires 64-bit mode";
1397 if (isParsingMSInlineAsm() && isParsingIntelSyntax() &&
1398 (RegNo == X86::EFLAGS || RegNo == X86::MXCSR))
1401 if (!is64BitMode()) {
1405 if (RegNo == X86::RIZ || RegNo == X86::RIP ||
1406 X86MCRegisterClasses[X86::GR64RegClassID].
contains(RegNo) ||
1409 return Error(StartLoc,
1410 "register %" +
RegName +
" is only available in 64-bit mode",
1417 if (RegNo == 0 &&
RegName.startswith(
"db")) {
1476 if (isParsingIntelSyntax())
1478 return Error(StartLoc,
"invalid register name",
SMRange(StartLoc, EndLoc));
1483 bool X86AsmParser::ParseRegister(
unsigned &RegNo,
SMLoc &StartLoc,
1484 SMLoc &EndLoc,
bool RestoreOnFailure) {
1490 auto OnFailure = [RestoreOnFailure, &Lexer, &Tokens]() {
1491 if (RestoreOnFailure) {
1492 while (!Tokens.empty()) {
1499 StartLoc = PercentTok.
getLoc();
1504 Tokens.push_back(PercentTok);
1513 if (isParsingIntelSyntax())
return true;
1514 return Error(StartLoc,
"invalid register name",
1518 if (MatchRegisterByName(RegNo, Tok.
getString(), StartLoc, EndLoc)) {
1524 if (RegNo == X86::ST0) {
1525 Tokens.push_back(Tok);
1532 Tokens.push_back(Parser.
getTok());
1538 return Error(IntTok.
getLoc(),
"expected stack index");
1541 case 0: RegNo = X86::ST0;
break;
1542 case 1: RegNo = X86::ST1;
break;
1543 case 2: RegNo = X86::ST2;
break;
1544 case 3: RegNo = X86::ST3;
break;
1545 case 4: RegNo = X86::ST4;
break;
1546 case 5: RegNo = X86::ST5;
break;
1547 case 6: RegNo = X86::ST6;
break;
1548 case 7: RegNo = X86::ST7;
break;
1551 return Error(IntTok.
getLoc(),
"invalid stack index");
1555 Tokens.push_back(IntTok);
1571 if (isParsingIntelSyntax())
return true;
1572 return Error(StartLoc,
"invalid register name",
1580 bool X86AsmParser::ParseRegister(
unsigned &RegNo,
SMLoc &StartLoc,
1582 return ParseRegister(RegNo, StartLoc, EndLoc,
false);
1589 ParseRegister(RegNo, StartLoc, EndLoc,
true);
1590 bool PendingErrors = getParser().hasPendingError();
1591 getParser().clearPendingErrors();
1599 std::unique_ptr<X86Operand> X86AsmParser::DefaultMemSIOperand(
SMLoc Loc) {
1600 bool Parse32 = is32BitMode() || Code16GCC;
1601 unsigned Basereg = is64BitMode() ? X86::RSI : (Parse32 ?
X86::ESI :
X86::SI);
1608 std::unique_ptr<X86Operand> X86AsmParser::DefaultMemDIOperand(
SMLoc Loc) {
1609 bool Parse32 = is32BitMode() || Code16GCC;
1610 unsigned Basereg = is64BitMode() ? X86::RDI : (Parse32 ?
X86::EDI : X86::DI);
1617 bool X86AsmParser::IsSIReg(
unsigned Reg) {
1631 unsigned X86AsmParser::GetSIDIForRegClass(
unsigned RegClassID,
unsigned Reg,
1633 switch (RegClassID) {
1635 case X86::GR64RegClassID:
1636 return IsSIReg ? X86::RSI : X86::RDI;
1637 case X86::GR32RegClassID:
1639 case X86::GR16RegClassID:
1640 return IsSIReg ?
X86::SI : X86::DI;
1644 void X86AsmParser::AddDefaultSrcDestOperands(
1646 std::unique_ptr<llvm::MCParsedAsmOperand> &&Dst) {
1647 if (isParsingIntelSyntax()) {
1657 bool X86AsmParser::VerifyAndAdjustOperands(
OperandVector &OrigOperands,
1660 if (OrigOperands.size() > 1) {
1662 assert(OrigOperands.size() == FinalOperands.size() + 1 &&
1663 "Operand size mismatch");
1667 int RegClassID = -1;
1668 for (
unsigned int i = 0;
i < FinalOperands.size(); ++
i) {
1672 if (FinalOp.
isReg() &&
1677 if (FinalOp.
isMem()) {
1679 if (!OrigOp.
isMem())
1683 unsigned OrigReg = OrigOp.
Mem.BaseReg;
1684 unsigned FinalReg = FinalOp.
Mem.BaseReg;
1688 if (RegClassID != -1 &&
1689 !X86MCRegisterClasses[RegClassID].
contains(OrigReg)) {
1691 "mismatching source and destination index registers");
1694 if (X86MCRegisterClasses[X86::GR64RegClassID].
contains(OrigReg))
1695 RegClassID = X86::GR64RegClassID;
1696 else if (X86MCRegisterClasses[X86::GR32RegClassID].
contains(OrigReg))
1697 RegClassID = X86::GR32RegClassID;
1698 else if (X86MCRegisterClasses[X86::GR16RegClassID].
contains(OrigReg))
1699 RegClassID = X86::GR16RegClassID;
1705 bool IsSI = IsSIReg(FinalReg);
1706 FinalReg = GetSIDIForRegClass(RegClassID, FinalReg, IsSI);
1708 if (FinalReg != OrigReg) {
1709 std::string
RegName = IsSI ?
"ES:(R|E)SI" :
"ES:(R|E)DI";
1710 Warnings.push_back(std::make_pair(
1712 "memory operand is only for determining the size, " +
RegName +
1713 " will be used for the location"));
1716 FinalOp.
Mem.Size = OrigOp.
Mem.Size;
1717 FinalOp.
Mem.SegReg = OrigOp.
Mem.SegReg;
1718 FinalOp.
Mem.BaseReg = FinalReg;
1724 for (
auto &WarningMsg : Warnings) {
1725 Warning(WarningMsg.first, WarningMsg.second);
1729 for (
unsigned int i = 0;
i < FinalOperands.size(); ++
i)
1730 OrigOperands.pop_back();
1733 for (
unsigned int i = 0;
i < FinalOperands.size(); ++
i)
1734 OrigOperands.push_back(
std::move(FinalOperands[
i]));
1740 if (isParsingIntelSyntax())
1746 bool X86AsmParser::CreateMemForMSInlineAsm(
1747 unsigned SegReg,
const MCExpr *Disp,
unsigned BaseReg,
unsigned IndexReg,
1755 Size = getPointerWidth();
1762 End, Size, Identifier,
1769 unsigned FrontendSize = 0;
1770 void *Decl =
nullptr;
1771 bool IsGlobalLV =
false;
1774 FrontendSize =
Info.Var.Type * 8;
1775 Decl =
Info.Var.Decl;
1776 IsGlobalLV =
Info.Var.IsGlobalLV;
1780 if (IsGlobalLV && (BaseReg || IndexReg)) {
1782 End, Size, Identifier, Decl, 0,
1783 BaseReg && IndexReg));
1789 BaseReg = BaseReg ? BaseReg : 1;
1791 getPointerWidth(), SegReg, Disp, BaseReg, IndexReg, Scale, Start, End,
1793 X86::RIP, Identifier, Decl, FrontendSize));
1801 IntelExprStateMachine &SM,
1802 bool &ParseError,
SMLoc &End) {
1806 !getParser().isParsingMasm())
1808 if (
Name.equals_insensitive(
"not")) {
1810 }
else if (
Name.equals_insensitive(
"or")) {
1812 }
else if (
Name.equals_insensitive(
"shl")) {
1814 }
else if (
Name.equals_insensitive(
"shr")) {
1816 }
else if (
Name.equals_insensitive(
"xor")) {
1818 }
else if (
Name.equals_insensitive(
"and")) {
1820 }
else if (
Name.equals_insensitive(
"mod")) {
1822 }
else if (
Name.equals_insensitive(
"offset")) {
1823 SMLoc OffsetLoc = getTok().getLoc();
1824 const MCExpr *Val =
nullptr;
1827 ParseError = ParseIntelOffsetOperator(Val,
ID,
Info, End);
1832 SM.onOffset(Val, OffsetLoc,
ID,
Info, isParsingMSInlineAsm(), ErrMsg);
1838 if (!
Name.equals_insensitive(
"offset"))
1839 End = consumeToken();
1843 IntelExprStateMachine &SM,
1844 bool &ParseError,
SMLoc &End) {
1845 if (
Name.equals_insensitive(
"eq")) {
1847 }
else if (
Name.equals_insensitive(
"ne")) {
1849 }
else if (
Name.equals_insensitive(
"lt")) {
1851 }
else if (
Name.equals_insensitive(
"le")) {
1853 }
else if (
Name.equals_insensitive(
"gt")) {
1855 }
else if (
Name.equals_insensitive(
"ge")) {
1860 End = consumeToken();
1867 IntelExprStateMachine &SM) {
1871 SM.setAppendAfterOperand();
1874 bool X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM,
SMLoc &End) {
1880 if (getContext().getObjectFileInfo()->isPositionIndependent())
1889 bool UpdateLocLex =
true;
1894 if ((Done = SM.isValidEndState()))
1896 return Error(Tok.
getLoc(),
"unknown token in expression");
1898 return Error(getLexer().getErrLoc(), getLexer().getErr());
1905 UpdateLocLex =
false;
1906 if (ParseIntelDotOperator(SM, End))
1911 if ((Done = SM.isValidEndState()))
1913 return Error(Tok.
getLoc(),
"unknown token in expression");
1917 UpdateLocLex =
false;
1918 if (ParseIntelDotOperator(SM, End))
1923 if ((Done = SM.isValidEndState()))
1925 return Error(Tok.
getLoc(),
"unknown token in expression");
1936 UpdateLocLex =
false;
1937 if (!Val->evaluateAsAbsolute(Res, getStreamer().getAssemblerPtr()))
1938 return Error(ValueLoc,
"expected absolute value");
1939 if (SM.onInteger(Res, ErrMsg))
1940 return Error(ValueLoc, ErrMsg);
1949 UpdateLocLex =
false;
1951 size_t DotOffset =
Identifier.find_first_of(
'.');
1969 const AsmToken &NextTok = getLexer().peekTok();
1978 End = consumeToken();
1985 if (!ParseRegister(
Reg, IdentLoc, End,
true)) {
1986 if (SM.onRegister(
Reg, ErrMsg))
1987 return Error(IdentLoc, ErrMsg);
1991 const std::pair<StringRef, StringRef> IDField =
1995 if (!
Field.empty() &&
1996 !MatchRegisterByName(
Reg,
ID, IdentLoc, IDEndLoc)) {
1997 if (SM.onRegister(
Reg, ErrMsg))
1998 return Error(IdentLoc, ErrMsg);
2003 return Error(FieldStartLoc,
"unknown offset");
2004 else if (SM.onPlus(ErrMsg))
2005 return Error(getTok().getLoc(), ErrMsg);
2006 else if (SM.onInteger(
Info.Offset, ErrMsg))
2007 return Error(IdentLoc, ErrMsg);
2008 SM.setTypeInfo(
Info.Type);
2010 End = consumeToken();
2016 bool ParseError =
false;
2017 if (ParseIntelNamedOperator(Identifier, SM, ParseError, End)) {
2023 ParseMasmNamedOperator(Identifier, SM, ParseError, End)) {
2036 if (ParseIntelDotOperator(SM, End))
2041 if (isParsingMSInlineAsm()) {
2043 if (
unsigned OpKind = IdentifyIntelInlineAsmOperator(Identifier)) {
2044 if (int64_t Val = ParseIntelInlineAsmOperator(OpKind)) {
2045 if (SM.onInteger(Val, ErrMsg))
2046 return Error(IdentLoc, ErrMsg);
2055 return Error(IdentLoc,
"expected identifier");
2056 if (ParseIntelInlineAsmIdentifier(Val, Identifier,
Info,
false, End))
2058 else if (SM.onIdentifierExpr(Val, Identifier,
Info, FieldInfo.
Type,
2060 return Error(IdentLoc, ErrMsg);
2064 if (
unsigned OpKind = IdentifyMasmOperator(Identifier)) {
2066 if (ParseMasmOperator(OpKind, Val))
2068 if (SM.onInteger(Val, ErrMsg))
2069 return Error(IdentLoc, ErrMsg);
2072 if (!getParser().lookUpType(Identifier, FieldInfo.
Type)) {
2078 getParser().parseIdentifier(Identifier);
2082 if (getParser().lookUpField(FieldInfo.
Type.
Name, Identifier,
2086 return Error(IdentLoc,
"Unable to lookup field reference!",
2092 if (SM.onInteger(FieldInfo.
Offset, ErrMsg))
2093 return Error(IdentLoc, ErrMsg);
2097 if (getParser().parsePrimaryExpr(Val, End, &FieldInfo.
Type)) {
2098 return Error(Tok.
getLoc(),
"Unexpected identifier!");
2099 }
else if (SM.onIdentifierExpr(Val, Identifier,
Info, FieldInfo.
Type,
2101 return Error(IdentLoc, ErrMsg);
2107 SMLoc Loc = getTok().getLoc();
2108 int64_t
IntVal = getTok().getIntVal();
2109 End = consumeToken();
2110 UpdateLocLex =
false;
2113 if (IDVal ==
"f" || IDVal ==
"b") {
2115 getContext().getDirectionalLocalSymbol(
IntVal, IDVal ==
"b");
2120 return Error(Loc,
"invalid reference to undefined symbol");
2124 if (SM.onIdentifierExpr(Val, Identifier,
Info,
Type,
2125 isParsingMSInlineAsm(), ErrMsg))
2126 return Error(Loc, ErrMsg);
2127 End = consumeToken();
2129 if (SM.onInteger(
IntVal, ErrMsg))
2130 return Error(Loc, ErrMsg);
2133 if (SM.onInteger(
IntVal, ErrMsg))
2134 return Error(Loc, ErrMsg);
2139 if (SM.onPlus(ErrMsg))
2140 return Error(getTok().getLoc(), ErrMsg);
2143 if (SM.onMinus(ErrMsg))
2144 return Error(getTok().getLoc(), ErrMsg);
2154 SM.onLShift();
break;
2156 SM.onRShift();
break;
2159 return Error(Tok.
getLoc(),
"unexpected bracket encountered");
2160 tryParseOperandIdx(PrevTK, SM);
2163 if (SM.onRBrac(ErrMsg)) {
2171 return Error(Tok.
getLoc(),
"unknown token in expression");
2173 if (!Done && UpdateLocLex)
2174 End = consumeToken();
2181 void X86AsmParser::RewriteIntelExpression(IntelExprStateMachine &SM,
2184 unsigned ExprLen = End.getPointer() - Start.getPointer();
2186 if (SM.getSym() && !SM.isOffsetOperator()) {
2188 if (
unsigned Len = SymName.
data() - Start.getPointer())
2191 ExprLen = End.getPointer() - (SymName.
data() + SymName.
size());
2194 if (!(SM.getBaseReg() || SM.getIndexReg() || SM.getImm())) {
2204 if (SM.getBaseReg())
2206 if (SM.getIndexReg())
2208 if (SM.isOffsetOperator())
2209 OffsetNameStr = SM.getSymName();
2211 IntelExpr Expr(BaseRegStr, IndexRegStr, SM.getScale(), OffsetNameStr,
2212 SM.getImm(), SM.isMemExpr());
2213 InstInfo->
AsmRewrites->emplace_back(Loc, ExprLen, Expr);
2217 bool X86AsmParser::ParseIntelInlineAsmIdentifier(
2219 bool IsUnevaluatedOperand,
SMLoc &End,
bool IsParsingOffsetOperator) {
2221 assert(isParsingMSInlineAsm() &&
"Expected to be parsing inline assembly.");
2225 SemaCallback->LookupInlineAsmIdentifier(LineBuf,
Info, IsUnevaluatedOperand);
2236 }
while (End.getPointer() < EndPtr);
2241 assert((End.getPointer() == EndPtr ||
2243 "frontend claimed part of a token?");
2249 SemaCallback->LookupInlineAsmLabel(Identifier, getSourceManager(),
2251 assert(InternalName.
size() &&
"We should have an internal name here.");
2254 if (!IsParsingOffsetOperator)
2262 MCSymbol *Sym = getContext().getOrCreateSymbol(Identifier);
2273 const SMLoc consumedToken = consumeToken();
2275 return Error(Tok.
getLoc(),
"Expected an identifier after {");
2284 return Error(Tok.
getLoc(),
"Invalid rounding mode.");
2287 return Error(Tok.
getLoc(),
"Expected - at this point");
2291 return Error(Tok.
getLoc(),
"Expected } at this point");
2294 const MCExpr *RndModeOp =
2302 return Error(Tok.
getLoc(),
"Expected } at this point");
2307 return Error(Tok.
getLoc(),
"unknown token in expression");
2311 bool X86AsmParser::ParseIntelDotOperator(IntelExprStateMachine &SM,
2327 }
else if ((isParsingMSInlineAsm() || getParser().isParsingMasm()) &&
2330 TrailingDot = DotDispStr.
substr(DotDispStr.
size() - 1);
2333 const std::pair<StringRef, StringRef> BaseMember = DotDispStr.
split(
'.');
2335 if (getParser().lookUpField(SM.getType(), DotDispStr,
Info) &&
2336 getParser().lookUpField(SM.getSymName(), DotDispStr,
Info) &&
2337 getParser().lookUpField(DotDispStr,
Info) &&
2339 SemaCallback->LookupInlineAsmField(
Base, Member,
Info.Offset)))
2340 return Error(Tok.
getLoc(),
"Unable to lookup field reference!");
2342 return Error(Tok.
getLoc(),
"Unexpected token type!");
2347 const char *DotExprEndLoc = DotDispStr.
data() + DotDispStr.
size();
2350 if (!TrailingDot.
empty())
2352 SM.addImm(
Info.Offset);
2353 SM.setTypeInfo(
Info.Type);
2363 SMLoc Start = Lex().getLoc();
2364 ID = getTok().getString();
2365 if (!isParsingMSInlineAsm()) {
2368 getParser().parsePrimaryExpr(Val, End,
nullptr))
2369 return Error(Start,
"unexpected token!");
2370 }
else if (ParseIntelInlineAsmIdentifier(Val,
ID,
Info,
false, End,
true)) {
2371 return Error(Start,
"unable to lookup expression");
2373 return Error(Start,
"offset operator cannot yet handle constants");
2380 unsigned X86AsmParser::IdentifyIntelInlineAsmOperator(
StringRef Name) {
2382 .
Cases(
"TYPE",
"type",IOK_TYPE)
2383 .
Cases(
"SIZE",
"size",IOK_SIZE)
2384 .
Cases(
"LENGTH",
"length",IOK_LENGTH)
2394 unsigned X86AsmParser::ParseIntelInlineAsmOperator(
unsigned OpKind) {
2399 const MCExpr *Val =
nullptr;
2403 if (ParseIntelInlineAsmIdentifier(Val, Identifier,
Info,
2408 Error(Start,
"unable to lookup expression");
2415 case IOK_LENGTH: CVal =
Info.Var.Length;
break;
2416 case IOK_SIZE: CVal =
Info.Var.Size;
break;
2417 case IOK_TYPE: CVal =
Info.Var.Type;
break;
2425 unsigned X86AsmParser::IdentifyMasmOperator(
StringRef Name) {
2427 .
Case(
"type", MOK_TYPE)
2428 .
Cases(
"size",
"sizeof", MOK_SIZEOF)
2429 .
Cases(
"length",
"lengthof", MOK_LENGTHOF)
2439 bool X86AsmParser::ParseMasmOperator(
unsigned OpKind, int64_t &Val) {
2445 if (OpKind == MOK_SIZEOF || OpKind == MOK_TYPE) {
2448 const AsmToken &IDTok = InParens ? getLexer().peekTok() : Parser.
getTok();
2464 IntelExprStateMachine SM;
2466 if (ParseIntelExpression(SM, End))
2476 Val = SM.getLength();
2479 Val = SM.getElementSize();
2484 return Error(OpLoc,
"expression has unknown type",
SMRange(Start, End));
2490 bool X86AsmParser::ParseIntelMemoryOperandSize(
unsigned &Size) {
2492 .
Cases(
"BYTE",
"byte", 8)
2493 .
Cases(
"WORD",
"word", 16)
2494 .
Cases(
"DWORD",
"dword", 32)
2495 .
Cases(
"FLOAT",
"float", 32)
2496 .
Cases(
"LONG",
"long", 32)
2497 .
Cases(
"FWORD",
"fword", 48)
2498 .
Cases(
"DOUBLE",
"double", 64)
2499 .
Cases(
"QWORD",
"qword", 64)
2500 .
Cases(
"MMWORD",
"mmword", 64)
2501 .
Cases(
"XWORD",
"xword", 80)
2502 .
Cases(
"TBYTE",
"tbyte", 80)
2503 .
Cases(
"XMMWORD",
"xmmword", 128)
2504 .
Cases(
"YMMWORD",
"ymmword", 256)
2505 .
Cases(
"ZMMWORD",
"zmmword", 512)
2510 return Error(Tok.
getLoc(),
"Expected 'PTR' or 'ptr' token!");
2523 if (ParseIntelMemoryOperandSize(Size))
2525 bool PtrInOperand = bool(Size);
2531 return ParseRoundingModeOp(Start,
Operands);
2536 if (RegNo == X86::RIP)
2537 return Error(Start,
"rip can only be used as a base register");
2541 return Error(Start,
"expected memory operand after 'ptr', "
2542 "found register operand instead");
2547 if (!X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].
contains(RegNo))
2548 return Error(Start,
"invalid segment register");
2550 Start = Lex().getLoc();
2554 IntelExprStateMachine SM;
2555 if (ParseIntelExpression(SM, End))
2558 if (isParsingMSInlineAsm())
2559 RewriteIntelExpression(SM, Start, Tok.
getLoc());
2561 int64_t Imm = SM.getImm();
2562 const MCExpr *Disp = SM.getSym();
2571 if (!SM.isMemExpr() && !RegNo) {
2572 if (isParsingMSInlineAsm() && SM.isOffsetOperator()) {
2578 SM.getSymName(),
Info.Var.Decl,
2579 Info.Var.IsGlobalLV));
2589 unsigned BaseReg = SM.getBaseReg();
2590 unsigned IndexReg = SM.getIndexReg();
2591 if (IndexReg && BaseReg == X86::RIP)
2593 unsigned Scale = SM.getScale();
2595 Size = SM.getElementSize() << 3;
2597 if (Scale == 0 && BaseReg !=
X86::ESP && BaseReg != X86::RSP &&
2598 (IndexReg ==
X86::ESP || IndexReg == X86::RSP))
2604 !(X86MCRegisterClasses[X86::VR128XRegClassID].
contains(IndexReg) ||
2605 X86MCRegisterClasses[X86::VR256XRegClassID].
contains(IndexReg) ||
2606 X86MCRegisterClasses[X86::VR512RegClassID].
contains(IndexReg)) &&
2607 (X86MCRegisterClasses[X86::VR128XRegClassID].
contains(BaseReg) ||
2608 X86MCRegisterClasses[X86::VR256XRegClassID].
contains(BaseReg) ||
2609 X86MCRegisterClasses[X86::VR512RegClassID].
contains(BaseReg)))
2613 X86MCRegisterClasses[X86::GR16RegClassID].
contains(IndexReg))
2614 return Error(Start,
"16-bit addresses cannot have a scale");
2623 if ((BaseReg ==
X86::SI || BaseReg == X86::DI) &&
2624 (IndexReg == X86::BX || IndexReg == X86::BP))
2627 if ((BaseReg || IndexReg) &&
2630 return Error(Start, ErrMsg);
2631 if (isParsingMSInlineAsm())
2632 return CreateMemForMSInlineAsm(RegNo, Disp, BaseReg, IndexReg, Scale, Start,
2633 End, Size, SM.getSymName(),
2638 unsigned DefaultBaseReg = X86::NoRegister;
2639 bool MaybeDirectBranchDest =
true;
2642 bool IsUnconditionalBranch =
2643 Name.equals_insensitive(
"jmp") ||
Name.equals_insensitive(
"call");
2644 if (is64BitMode() && SM.getElementSize() > 0) {
2645 DefaultBaseReg = X86::RIP;
2647 if (IsUnconditionalBranch) {
2649 MaybeDirectBranchDest =
false;
2651 DefaultBaseReg = X86::RIP;
2652 }
else if (!BaseReg && !IndexReg && Disp &&
2654 if (is64BitMode()) {
2655 if (SM.getSize() == 8) {
2656 MaybeDirectBranchDest =
false;
2657 DefaultBaseReg = X86::RIP;
2660 if (SM.getSize() == 4 || SM.getSize() == 2)
2661 MaybeDirectBranchDest =
false;
2667 if ((BaseReg || IndexReg || RegNo || DefaultBaseReg != X86::NoRegister))
2669 getPointerWidth(), RegNo, Disp, BaseReg, IndexReg, Scale, Start, End,
2670 Size, DefaultBaseReg,
StringRef(),
nullptr,
2671 0,
false, MaybeDirectBranchDest));
2674 getPointerWidth(), Disp, Start, End, Size,
StringRef(),
2676 MaybeDirectBranchDest));
2682 switch (getLexer().getKind()) {
2692 "expected immediate expression") ||
2693 getParser().parseExpression(Val, End) ||
2694 check(isa<X86MCExpr>(Val), L,
"expected immediate expression"))
2701 return ParseRoundingModeOp(Start,
Operands);
2710 const MCExpr *Expr =
nullptr;
2716 if (
auto *RE = dyn_cast<X86MCExpr>(Expr)) {
2719 Reg = RE->getRegNo();
2722 if (
Reg == X86::EIZ ||
Reg == X86::RIZ)
2724 Loc,
"%eiz and %riz can only be used as index registers",
2726 if (
Reg == X86::RIP)
2727 return Error(Loc,
"%rip can only be used as a base register",
2734 if (!X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].
contains(
Reg))
2735 return Error(Loc,
"invalid segment register");
2743 return ParseMemOperand(
Reg, Expr, Loc, EndLoc,
Operands);
2773 bool X86AsmParser::ParseZ(std::unique_ptr<X86Operand> &Z,
2774 const SMLoc &StartLoc) {
2780 (getLexer().getTok().getIdentifier() ==
"z")))
2785 return Error(getLexer().getLoc(),
"Expected } at this point");
2797 const SMLoc consumedToken = consumeToken();
2801 if (getLexer().getTok().getIntVal() != 1)
2802 return TokError(
"Expected 1to<NUM> at this point");
2806 return TokError(
"Expected 1to<NUM> at this point");
2809 StringRef BroadcastString = (
Prefix + getLexer().getTok().getIdentifier())
2812 return TokError(
"Expected 1to<NUM> at this point");
2813 const char *BroadcastPrimitive =
2815 .
Case(
"1to2",
"{1to2}")
2816 .
Case(
"1to4",
"{1to4}")
2817 .
Case(
"1to8",
"{1to8}")
2818 .
Case(
"1to16",
"{1to16}")
2819 .
Case(
"1to32",
"{1to32}")
2821 if (!BroadcastPrimitive)
2822 return TokError(
"Invalid memory broadcast primitive.");
2825 return TokError(
"Expected } at this point");
2836 std::unique_ptr<X86Operand>
Z;
2837 if (ParseZ(Z, consumedToken))
2843 SMLoc StartLoc =
Z ? consumeToken() : consumedToken;
2848 if (!ParseRegister(RegNo, RegLoc, StartLoc) &&
2849 X86MCRegisterClasses[X86::VK1RegClassID].
contains(RegNo)) {
2850 if (RegNo == X86::K0)
2851 return Error(RegLoc,
"Register k0 can't be used as write mask");
2853 return Error(getLexer().getLoc(),
"Expected } at this point");
2859 return Error(getLexer().getLoc(),
2860 "Expected an op-mask register at this point");
2865 if (ParseZ(Z, consumeToken()) || !Z)
2866 return Error(getLexer().getLoc(),
2867 "Expected a {z} mark at this point");
2883 bool X86AsmParser::ParseMemOperand(
unsigned SegReg,
const MCExpr *Disp,
2904 auto isAtMemOperand = [
this]() {
2909 auto TokCount = this->getLexer().peekTokens(Buf,
true);
2912 switch (Buf[0].getKind()) {
2919 if ((TokCount > 1) &&
2921 (Buf[0].getLoc().getPointer() + 1 == Buf[1].getLoc().getPointer()))
2923 Buf[1].getIdentifier().
size() + 1);
2934 MCSymbol *Sym = this->getContext().getOrCreateSymbol(
Id);
2937 return isa<X86MCExpr>(V);
2945 if (!isAtMemOperand()) {
2948 assert(!isa<X86MCExpr>(Disp) &&
"Expected non-register here.");
2964 0, 0, 1, StartLoc, EndLoc));
2970 unsigned BaseReg = 0, IndexReg = 0, Scale = 1;
2971 SMLoc BaseLoc = getLexer().getLoc();
2978 check(!isa<X86MCExpr>(
E), BaseLoc,
"expected register here"))
2982 BaseReg = cast<X86MCExpr>(
E)->getRegNo();
2983 if (BaseReg == X86::EIZ || BaseReg == X86::RIZ)
2984 return Error(BaseLoc,
"eiz and riz can only be used as index registers",
2999 if (!isa<X86MCExpr>(
E)) {
3003 if (!
E->evaluateAsAbsolute(ScaleVal, getStreamer().getAssemblerPtr()))
3004 return Error(Loc,
"expected absolute expression");
3006 Warning(Loc,
"scale factor without index register is ignored");
3009 IndexReg = cast<X86MCExpr>(
E)->getRegNo();
3011 if (BaseReg == X86::RIP)
3013 "%rip as base register can not have an index register");
3014 if (IndexReg == X86::RIP)
3015 return Error(Loc,
"%rip is not allowed as an index register");
3026 return Error(Loc,
"expected scale expression");
3027 Scale = (unsigned)ScaleVal;
3029 if (X86MCRegisterClasses[X86::GR16RegClassID].
contains(BaseReg) &&
3031 return Error(Loc,
"scale factor in 16-bit address must be 1");
3033 return Error(Loc, ErrMsg);
3047 if (BaseReg == X86::DX && IndexReg == 0 && Scale == 1 && SegReg == 0 &&
3048 isa<MCConstantExpr>(Disp) &&
3049 cast<MCConstantExpr>(Disp)->getValue() == 0) {
3056 return Error(BaseLoc, ErrMsg);
3058 if (SegReg || BaseReg || IndexReg)
3060 BaseReg, IndexReg, Scale, StartLoc,
3069 bool X86AsmParser::parsePrimaryExpr(
const MCExpr *&Res,
SMLoc &EndLoc) {
3077 if (ParseRegister(RegNo, StartLoc, EndLoc))
3091 ForcedVEXEncoding = VEXEncoding_Default;
3092 ForcedDispEncoding = DispEncoding_Default;
3106 ForcedVEXEncoding = VEXEncoding_VEX;
3107 else if (
Prefix ==
"vex2")
3108 ForcedVEXEncoding = VEXEncoding_VEX2;
3109 else if (
Prefix ==
"vex3")
3110 ForcedVEXEncoding = VEXEncoding_VEX3;
3111 else if (
Prefix ==
"evex")
3112 ForcedVEXEncoding = VEXEncoding_EVEX;
3113 else if (
Prefix ==
"disp8")
3114 ForcedDispEncoding = DispEncoding_Disp8;
3115 else if (
Prefix ==
"disp32")
3116 ForcedDispEncoding = DispEncoding_Disp32;
3118 return Error(NameLoc,
"unknown prefix");
3134 if (isParsingMSInlineAsm()) {
3135 if (
Name.equals_insensitive(
"vex"))
3136 ForcedVEXEncoding = VEXEncoding_VEX;
3137 else if (
Name.equals_insensitive(
"vex2"))
3138 ForcedVEXEncoding = VEXEncoding_VEX2;
3139 else if (
Name.equals_insensitive(
"vex3"))
3140 ForcedVEXEncoding = VEXEncoding_VEX3;
3141 else if (
Name.equals_insensitive(
"evex"))
3142 ForcedVEXEncoding = VEXEncoding_EVEX;
3144 if (ForcedVEXEncoding != VEXEncoding_Default) {
3157 if (
Name.consume_back(
".d32")) {
3158 ForcedDispEncoding = DispEncoding_Disp32;
3159 }
else if (
Name.consume_back(
".d8")) {
3160 ForcedDispEncoding = DispEncoding_Disp8;
3166 if (isParsingIntelSyntax() &&
3167 (PatchedName ==
"jmp" || PatchedName ==
"jc" || PatchedName ==
"jnc" ||
3168 PatchedName ==
"jcxz" || PatchedName ==
"jecxz" ||
3173 : NextTok ==
"short") {
3182 NextTok.
size() + 1);
3188 PatchedName !=
"setb" && PatchedName !=
"setnb")
3189 PatchedName = PatchedName.
substr(0,
Name.size()-1);
3191 unsigned ComparisonPredicate = ~0U;
3198 bool IsVCMP = PatchedName[0] ==
'v';
3199 unsigned CCIdx =
IsVCMP ? 4 : 3;
3201 PatchedName.
slice(CCIdx, PatchedName.
size() - 2))
3203 .
Case(
"eq_oq", 0x00)
3205 .
Case(
"lt_os", 0x01)
3207 .
Case(
"le_os", 0x02)
3208 .
Case(
"unord", 0x03)
3209 .
Case(
"unord_q", 0x03)
3211 .
Case(
"neq_uq", 0x04)
3213 .
Case(
"nlt_us", 0x05)
3215 .
Case(
"nle_us", 0x06)
3217 .
Case(
"ord_q", 0x07)
3219 .
Case(
"eq_uq", 0x08)
3221 .
Case(
"nge_us", 0x09)
3223 .
Case(
"ngt_us", 0x0A)
3224 .
Case(
"false", 0x0B)
3225 .
Case(
"false_oq", 0x0B)
3226 .
Case(
"neq_oq", 0x0C)
3228 .
Case(
"ge_os", 0x0D)
3230 .
Case(
"gt_os", 0x0E)
3232 .
Case(
"true_uq", 0x0F)
3233 .
Case(
"eq_os", 0x10)
3234 .
Case(
"lt_oq", 0x11)
3235 .
Case(
"le_oq", 0x12)
3236 .
Case(
"unord_s", 0x13)
3237 .
Case(
"neq_us", 0x14)
3238 .
Case(
"nlt_uq", 0x15)
3239 .
Case(
"nle_uq", 0x16)
3240 .
Case(
"ord_s", 0x17)
3241 .
Case(
"eq_us", 0x18)
3242 .
Case(
"nge_uq", 0x19)
3243 .
Case(
"ngt_uq", 0x1A)
3244 .
Case(
"false_os", 0x1B)
3245 .
Case(
"neq_os", 0x1C)
3246 .
Case(
"ge_oq", 0x1D)
3247 .
Case(
"gt_oq", 0x1E)
3248 .
Case(
"true_us", 0x1F)
3250 if (CC != ~0U && (
IsVCMP || CC < 8) &&
3253 PatchedName =
IsVCMP ?
"vcmpss" :
"cmpss";
3254 else if (PatchedName.
endswith(
"sd"))
3255 PatchedName =
IsVCMP ?
"vcmpsd" :
"cmpsd";
3256 else if (PatchedName.
endswith(
"ps"))
3257 PatchedName =
IsVCMP ?
"vcmpps" :
"cmpps";
3258 else if (PatchedName.
endswith(
"pd"))
3259 PatchedName =
IsVCMP ?
"vcmppd" :
"cmppd";
3260 else if (PatchedName.
endswith(
"sh"))
3261 PatchedName =
"vcmpsh";
3262 else if (PatchedName.
endswith(
"ph"))
3263 PatchedName =
"vcmpph";
3267 ComparisonPredicate = CC;
3273 (PatchedName.
back() ==
'b' || PatchedName.
back() ==
'w' ||
3274 PatchedName.
back() ==
'd' || PatchedName.
back() ==
'q')) {
3275 unsigned SuffixSize = PatchedName.
drop_back().
back() ==
'u' ? 2 : 1;
3277 PatchedName.
slice(5, PatchedName.
size() - SuffixSize))
3287 if (CC != ~0U && (CC != 0 || SuffixSize == 2)) {
3288 switch (PatchedName.
back()) {
3290 case 'b': PatchedName = SuffixSize == 2 ?
"vpcmpub" :
"vpcmpb";
break;
3291 case 'w': PatchedName = SuffixSize == 2 ?
"vpcmpuw" :
"vpcmpw";
break;
3292 case 'd': PatchedName = SuffixSize == 2 ?
"vpcmpud" :
"vpcmpd";
break;
3293 case 'q': PatchedName = SuffixSize == 2 ?
"vpcmpuq" :
"vpcmpq";
break;
3296 ComparisonPredicate = CC;
3302 (PatchedName.
back() ==
'b' || PatchedName.
back() ==
'w' ||
3303 PatchedName.
back() ==
'd' || PatchedName.
back() ==
'q')) {
3304 unsigned SuffixSize = PatchedName.
drop_back().
back() ==
'u' ? 2 : 1;
3306 PatchedName.
slice(5, PatchedName.
size() - SuffixSize))
3317 switch (PatchedName.
back()) {
3319 case 'b': PatchedName = SuffixSize == 2 ?
"vpcomub" :
"vpcomb";
break;
3320 case 'w': PatchedName = SuffixSize == 2 ?
"vpcomuw" :
"vpcomw";
break;
3321 case 'd': PatchedName = SuffixSize == 2 ?
"vpcomud" :
"vpcomd";
break;
3322 case 'q': PatchedName = SuffixSize == 2 ?
"vpcomuq" :
"vpcomq";
break;
3325 ComparisonPredicate = CC;
3339 .
Cases(
"cs",
"ds",
"es",
"fs",
"gs",
"ss",
true)
3340 .
Cases(
"rex64",
"data32",
"data16",
"addr32",
"addr16",
true)
3341 .
Cases(
"xacquire",
"xrelease",
true)
3342 .
Cases(
"acquire",
"release", isParsingIntelSyntax())
3345 auto isLockRepeatNtPrefix = [](
StringRef N) {
3347 .
Cases(
"lock",
"rep",
"repe",
"repz",
"repne",
"repnz",
"notrack",
true)
3351 bool CurlyAsEndOfStatement =
false;
3354 while (isLockRepeatNtPrefix(
Name.lower())) {
3375 while (
Name.startswith(
";") ||
Name.startswith(
"\n") ||
3376 Name.startswith(
"#") ||
Name.startswith(
"\t") ||
3377 Name.startswith(
"/")) {
3388 if (PatchedName ==
"data16" && is16BitMode()) {
3389 return Error(NameLoc,
"redundant data16 prefix");
3391 if (PatchedName ==
"data32") {
3393 return Error(NameLoc,
"redundant data32 prefix");
3395 return Error(NameLoc,
"'data32' is not supported in 64-bit mode");
3397 PatchedName =
"data16";
3404 if (Next ==
"callw")
3406 if (Next ==
"ljmpw")
3411 ForcedDataPrefix = X86::Is32Bit;
3419 if (ComparisonPredicate != ~0U && !isParsingIntelSyntax()) {
3421 getParser().getContext());
3450 CurlyAsEndOfStatement =
3451 isParsingIntelSyntax() && isParsingMSInlineAsm() &&
3454 return TokError(
"unexpected token in argument list");
3458 if (ComparisonPredicate != ~0U && isParsingIntelSyntax()) {
3460 getParser().getContext());
3468 else if (CurlyAsEndOfStatement)
3471 getLexer().getTok().getLoc(), 0);
3478 if (IsFp &&
Operands.size() == 1) {
3480 .
Case(
"fsub",
"fsubp")
3481 .
Case(
"fdiv",
"fdivp")
3482 .
Case(
"fsubr",
"fsubrp")
3483 .
Case(
"fdivr",
"fdivrp");
3487 if ((
Name ==
"mov" ||
Name ==
"movw" ||
Name ==
"movl") &&
3495 X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(
3497 (X86MCRegisterClasses[X86::GR16RegClassID].
contains(Op1.
getReg()) ||
3498 X86MCRegisterClasses[X86::GR32RegClassID].
contains(Op1.
getReg()))) {
3500 if (
Name !=
"mov" &&
Name[3] == (is16BitMode() ?
'l' :
'w')) {
3501 Name = is16BitMode() ?
"movw" :
"movl";
3514 if ((
Name ==
"outb" ||
Name ==
"outsb" ||
Name ==
"outw" ||
Name ==
"outsw" ||
3533 bool HadVerifyError =
false;
3536 if (
Name.startswith(
"ins") &&
3541 AddDefaultSrcDestOperands(TmpOperands,
3543 DefaultMemDIOperand(NameLoc));
3544 HadVerifyError = VerifyAndAdjustOperands(
Operands, TmpOperands);
3548 if (
Name.startswith(
"outs") &&
3550 (
Name ==
"outsb" ||
Name ==
"outsw" ||
Name ==
"outsl" ||
3551 Name ==
"outsd" ||
Name ==
"outs")) {
3552 AddDefaultSrcDestOperands(TmpOperands, DefaultMemSIOperand(NameLoc),
3554 HadVerifyError = VerifyAndAdjustOperands(
Operands, TmpOperands);
3560 if (
Name.startswith(
"lods") &&
3562 (
Name ==
"lods" ||
Name ==
"lodsb" ||
Name ==
"lodsw" ||
3563 Name ==
"lodsl" ||
Name ==
"lodsd" ||
Name ==
"lodsq")) {
3564 TmpOperands.push_back(DefaultMemSIOperand(NameLoc));
3565 HadVerifyError = VerifyAndAdjustOperands(
Operands, TmpOperands);
3571 if (
Name.startswith(
"stos") &&
3573 (
Name ==
"stos" ||
Name ==
"stosb" ||
Name ==
"stosw" ||
3574 Name ==
"stosl" ||
Name ==
"stosd" ||
Name ==
"stosq")) {
3575 TmpOperands.push_back(DefaultMemDIOperand(NameLoc));
3576 HadVerifyError = VerifyAndAdjustOperands(
Operands, TmpOperands);
3582 if (
Name.startswith(
"scas") &&
3584 (
Name ==
"scas" ||
Name ==
"scasb" ||
Name ==
"scasw" ||
3585 Name ==
"scasl" ||
Name ==
"scasd" ||
Name ==
"scasq")) {
3586 TmpOperands.push_back(DefaultMemDIOperand(NameLoc));
3587 HadVerifyError = VerifyAndAdjustOperands(
Operands, TmpOperands);
3591 if (
Name.startswith(
"cmps") &&
3593 (
Name ==
"cmps" ||
Name ==
"cmpsb" ||
Name ==
"cmpsw" ||
3594 Name ==
"cmpsl" ||
Name ==
"cmpsd" ||
Name ==
"cmpsq")) {
3595 AddDefaultSrcDestOperands(TmpOperands, DefaultMemDIOperand(NameLoc),
3596 DefaultMemSIOperand(NameLoc));
3597 HadVerifyError = VerifyAndAdjustOperands(
Operands, TmpOperands);
3601 if (((
Name.startswith(
"movs") &&
3602 (
Name ==
"movs" ||
Name ==
"movsb" ||
Name ==
"movsw" ||
3603 Name ==
"movsl" ||
Name ==
"movsd" ||
Name ==
"movsq")) ||
3604 (
Name.startswith(
"smov") &&
3605 (
Name ==
"smov" ||
Name ==
"smovb" ||
Name ==
"smovw" ||
3606 Name ==
"smovl" ||
Name ==
"smovd" ||
Name ==
"smovq"))) &&
3608 if (
Name ==
"movsd" &&
Operands.size() == 1 && !isParsingIntelSyntax())
3610 AddDefaultSrcDestOperands(TmpOperands, DefaultMemSIOperand(NameLoc),
3611 DefaultMemDIOperand(NameLoc));
3612 HadVerifyError = VerifyAndAdjustOperands(
Operands, TmpOperands);
3616 if (HadVerifyError) {
3617 return HadVerifyError;
3625 "size, (R|E)BX will be used for the location");
3640 default:
return false;
3645 if (ForcedDispEncoding == DispEncoding_Disp32) {
3646 Inst.
setOpcode(is16BitMode() ? X86::JMP_2 : X86::JMP_4);
3655 if (ForcedDispEncoding == DispEncoding_Disp32) {
3656 Inst.
setOpcode(is16BitMode() ? X86::JCC_2 : X86::JCC_4);
3661 case X86::VMOVZPQILo2PQIrr:
3662 case X86::VMOVAPDrr:
3663 case X86::VMOVAPDYrr:
3664 case X86::VMOVAPSrr:
3665 case X86::VMOVAPSYrr:
3666 case X86::VMOVDQArr:
3667 case X86::VMOVDQAYrr:
3668 case X86::VMOVDQUrr:
3669 case X86::VMOVDQUYrr:
3670 case X86::VMOVUPDrr:
3671 case X86::VMOVUPDYrr:
3672 case X86::VMOVUPSrr:
3673 case X86::VMOVUPSYrr: {
3676 if (ForcedVEXEncoding == VEXEncoding_VEX3 ||
3684 case X86::VMOVZPQILo2PQIrr: NewOpc = X86::VMOVPQI2QIrr;
break;
3685 case X86::VMOVAPDrr: NewOpc = X86::VMOVAPDrr_REV;
break;
3686 case X86::VMOVAPDYrr: NewOpc = X86::VMOVAPDYrr_REV;
break;
3687 case X86::VMOVAPSrr: NewOpc = X86::VMOVAPSrr_REV;
break;
3688 case X86::VMOVAPSYrr: NewOpc = X86::VMOVAPSYrr_REV;
break;
3689 case X86::VMOVDQArr: NewOpc = X86::VMOVDQArr_REV;
break;
3690 case X86::VMOVDQAYrr: NewOpc = X86::VMOVDQAYrr_REV;
break;
3691 case X86::VMOVDQUrr: NewOpc = X86::VMOVDQUrr_REV;
break;
3692 case X86::VMOVDQUYrr: NewOpc = X86::VMOVDQUYrr_REV;
break;
3693 case X86::VMOVUPDrr: NewOpc = X86::VMOVUPDrr_REV;
break;
3694 case X86::VMOVUPDYrr: NewOpc = X86::VMOVUPDYrr_REV;
break;
3695 case X86::VMOVUPSrr: NewOpc = X86::VMOVUPSrr_REV;
break;
3696 case X86::VMOVUPSYrr: NewOpc = X86::VMOVUPSYrr_REV;
break;
3702 case X86::VMOVSSrr: {
3705 if (ForcedVEXEncoding == VEXEncoding_VEX3 ||
3713 case X86::VMOVSDrr: NewOpc = X86::VMOVSDrr_REV;
break;
3714 case X86::VMOVSSrr: NewOpc = X86::VMOVSSrr_REV;
break;
3719 case X86::RCR8ri:
case X86::RCR16ri:
case X86::RCR32ri:
case X86::RCR64ri:
3720 case X86::RCL8ri:
case X86::RCL16ri:
case X86::RCL32ri:
case X86::RCL64ri:
3721 case X86::ROR8ri:
case X86::ROR16ri:
case X86::ROR32ri:
case X86::ROR64ri:
3722 case X86::ROL8ri:
case X86::ROL16ri:
case X86::ROL32ri:
case X86::ROL64ri:
3723 case X86::SAR8ri:
case X86::SAR16ri:
case X86::SAR32ri:
case X86::SAR64ri:
3724 case X86::SHR8ri:
case X86::SHR16ri:
case X86::SHR32ri:
case X86::SHR64ri:
3725 case X86::SHL8ri:
case X86::SHL16ri:
case X86::SHL32ri:
case X86::SHL64ri: {
3734 case X86::RCR8ri: NewOpc = X86::RCR8r1;
break;
3735 case X86::RCR16ri: NewOpc = X86::RCR16r1;
break;
3736 case X86::RCR32ri: NewOpc = X86::RCR32r1;
break;
3737 case X86::RCR64ri: NewOpc = X86::RCR64r1;
break;
3738 case X86::RCL8ri: NewOpc = X86::RCL8r1;
break;
3739 case X86::RCL16ri: NewOpc = X86::RCL16r1;
break;
3740 case X86::RCL32ri: NewOpc = X86::RCL32r1;
break;
3741 case X86::RCL64ri: NewOpc = X86::RCL64r1;
break;
3742 case X86::ROR8ri: NewOpc = X86::ROR8r1;
break;
3743 case X86::ROR16ri: NewOpc = X86::ROR16r1;
break;
3744 case X86::ROR32ri: NewOpc = X86::ROR32r1;
break;
3745 case X86::ROR64ri: NewOpc = X86::ROR64r1;
break;
3746 case X86::ROL8ri: NewOpc = X86::ROL8r1;
break;
3747 case X86::ROL16ri: NewOpc = X86::ROL16r1;
break;
3748 case X86::ROL32ri: NewOpc = X86::ROL32r1;
break;
3749 case X86::ROL64ri: NewOpc = X86::ROL64r1;
break;
3750 case X86::SAR8ri: NewOpc = X86::SAR8r1;
break;
3751 case X86::SAR16ri: NewOpc = X86::SAR16r1;
break;
3752 case X86::SAR32ri: NewOpc = X86::SAR32r1;
break;
3753 case X86::SAR64ri: NewOpc = X86::SAR64r1;
break;
3754 case X86::SHR8ri: NewOpc = X86::SHR8r1;
break;
3755 case X86::SHR16ri: NewOpc = X86::SHR16r1;
break;
3756 case X86::SHR32ri: NewOpc = X86::SHR32r1;
break;
3757 case X86::SHR64ri: NewOpc = X86::SHR64r1;
break;
3758 case X86::SHL8ri: NewOpc = X86::SHL8r1;
break;
3759 case X86::SHL16ri: NewOpc = X86::SHL16r1;
break;
3760 case X86::SHL32ri: NewOpc = X86::SHL32r1;
break;
3761 case X86::SHL64ri: NewOpc = X86::SHL64r1;
break;
3771 case X86::RCR8mi:
case X86::RCR16mi:
case X86::RCR32mi:
case X86::RCR64mi:
3772 case X86::RCL8mi:
case X86::RCL16mi:
case X86::RCL32mi:
case X86::RCL64mi:
3773 case X86::ROR8mi:
case X86::ROR16mi:
case X86::ROR32mi:
case X86::ROR64mi:
3774 case X86::ROL8mi:
case X86::ROL16mi:
case X86::ROL32mi:
case X86::ROL64mi:
3775 case X86::SAR8mi:
case X86::SAR16mi:
case X86::SAR32mi:
case X86::SAR64mi:
3776 case X86::SHR8mi:
case X86::SHR16mi:
case X86::SHR32mi:
case X86::SHR64mi:
3777 case X86::SHL8mi:
case X86::SHL16mi:
case X86::SHL32mi:
case X86::SHL64mi: {
3787 case X86::RCR8mi: NewOpc = X86::RCR8m1;
break;
3788 case X86::RCR16mi: NewOpc = X86::RCR16m1;
break;
3789 case X86::RCR32mi: NewOpc = X86::RCR32m1;
break;
3790 case X86::RCR64mi: NewOpc = X86::RCR64m1;
break;
3791 case X86::RCL8mi: NewOpc = X86::RCL8m1;
break;
3792 case X86::RCL16mi: NewOpc = X86::RCL16m1;
break;
3793 case X86::RCL32mi: NewOpc = X86::RCL32m1;
break;
3794 case X86::RCL64mi: NewOpc = X86::RCL64m1;
break;
3795 case X86::ROR8mi: NewOpc = X86::ROR8m1;
break;
3796 case X86::ROR16mi: NewOpc = X86::ROR16m1;
break;
3797 case X86::ROR32mi: NewOpc = X86::ROR32m1;
break;
3798 case X86::ROR64mi: NewOpc = X86::ROR64m1;
break;
3799 case X86::ROL8mi: NewOpc = X86::ROL8m1;
break;
3800 case X86::ROL16mi: NewOpc = X86::ROL16m1;
break;
3801 case X86::ROL32mi: NewOpc = X86::ROL32m1;
break;
3802 case X86::ROL64mi: NewOpc = X86::ROL64m1;
break;
3803 case X86::SAR8mi: NewOpc = X86::SAR8m1;
break;
3804 case X86::SAR16mi: NewOpc = X86::SAR16m1;
break;
3805 case X86::SAR32mi: NewOpc = X86::SAR32m1;
break;
3806 case X86::SAR64mi: NewOpc = X86::SAR64m1;
break;
3807 case X86::SHR8mi: NewOpc = X86::SHR8m1;
break;
3808 case X86::SHR16mi: NewOpc = X86::SHR16m1;
break;
3809 case X86::SHR32mi: NewOpc = X86::SHR32m1;
break;
3810 case X86::SHR64mi: NewOpc = X86::SHR64m1;
break;
3811 case X86::SHL8mi: NewOpc = X86::SHL8m1;
break;
3812 case X86::SHL16mi: NewOpc = X86::SHL16m1;
break;
3813 case X86::SHL32mi: NewOpc = X86::SHL32m1;
break;
3814 case X86::SHL64mi: NewOpc = X86::SHL64m1;
break;
3842 case X86::VGATHERDPDYrm:
3843 case X86::VGATHERDPDrm:
3844 case X86::VGATHERDPSYrm:
3845 case X86::VGATHERDPSrm:
3846 case X86::VGATHERQPDYrm:
3847 case X86::VGATHERQPDrm:
3848 case X86::VGATHERQPSYrm:
3849 case X86::VGATHERQPSrm:
3850 case X86::VPGATHERDDYrm:
3851 case X86::VPGATHERDDrm:
3852 case X86::VPGATHERDQYrm:
3853 case X86::VPGATHERDQrm:
3854 case X86::VPGATHERQDYrm:
3855 case X86::VPGATHERQDrm:
3856 case X86::VPGATHERQQYrm:
3857 case X86::VPGATHERQQrm: {
3862 if (Dest ==
Mask || Dest == Index ||
Mask == Index)
3863 return Warning(Ops[0]->getStartLoc(),
"mask, index, and destination "
3864 "registers should be distinct");
3867 case X86::VGATHERDPDZ128rm:
3868 case X86::VGATHERDPDZ256rm:
3869 case X86::VGATHERDPDZrm:
3870 case X86::VGATHERDPSZ128rm:
3871 case X86::VGATHERDPSZ256rm:
3872 case X86::VGATHERDPSZrm:
3873 case X86::VGATHERQPDZ128rm:
3874 case X86::VGATHERQPDZ256rm:
3875 case X86::VGATHERQPDZrm:
3876 case X86::VGATHERQPSZ128rm:
3877 case X86::VGATHERQPSZ256rm:
3878 case X86::VGATHERQPSZrm:
3879 case X86::VPGATHERDDZ128rm:
3880 case X86::VPGATHERDDZ256rm:
3881 case X86::VPGATHERDDZrm:
3882 case X86::VPGATHERDQZ128rm:
3883 case X86::VPGATHERDQZ256rm:
3884 case X86::VPGATHERDQZrm:
3885 case X86::VPGATHERQDZ128rm:
3886 case X86::VPGATHERQDZ256rm:
3887 case X86::VPGATHERQDZrm:
3888 case X86::VPGATHERQQZ128rm:
3889 case X86::VPGATHERQQZ256rm:
3890 case X86::VPGATHERQQZrm: {
3895 return Warning(Ops[0]->getStartLoc(),
"index and destination registers "
3896 "should be distinct");
3899 case X86::V4FMADDPSrm:
3900 case X86::V4FMADDPSrmk:
3901 case X86::V4FMADDPSrmkz:
3902 case X86::V4FMADDSSrm:
3903 case X86::V4FMADDSSrmk:
3904 case X86::V4FMADDSSrmkz:
3905 case X86::V4FNMADDPSrm:
3906 case X86::V4FNMADDPSrmk:
3907 case X86::V4FNMADDPSrmkz:
3908 case X86::V4FNMADDSSrm:
3909 case X86::V4FNMADDSSrmk:
3910 case X86::V4FNMADDSSrmkz:
3911 case X86::VP4DPWSSDSrm:
3912 case X86::VP4DPWSSDSrmk:
3913 case X86::VP4DPWSSDSrmkz:
3914 case X86::VP4DPWSSDrm:
3915 case X86::VP4DPWSSDrmk:
3916 case X86::VP4DPWSSDrmkz: {
3919 unsigned Src2Enc =
MRI->getEncodingValue(Src2);
3920 if (Src2Enc % 4 != 0) {
3922 unsigned GroupStart = (Src2Enc / 4) * 4;
3923 unsigned GroupEnd = GroupStart + 3;
3924 return Warning(Ops[0]->getStartLoc(),
3925 "source register '" +
RegName +
"' implicitly denotes '" +
3932 case X86::VFCMADDCPHZ128m:
3933 case X86::VFCMADDCPHZ256m:
3934 case X86::VFCMADDCPHZm:
3935 case X86::VFCMADDCPHZ128mb:
3936 case X86::VFCMADDCPHZ256mb:
3937 case X86::VFCMADDCPHZmb:
3938 case X86::VFCMADDCPHZ128mbk:
3939 case X86::VFCMADDCPHZ256mbk:
3940 case X86::VFCMADDCPHZmbk:
3941 case X86::VFCMADDCPHZ128mbkz:
3942 case X86::VFCMADDCPHZ256mbkz:
3943 case X86::VFCMADDCPHZmbkz:
3944 case X86::VFCMADDCPHZ128mk:
3945 case X86::VFCMADDCPHZ256mk:
3946 case X86::VFCMADDCPHZmk:
3947 case X86::VFCMADDCPHZ128mkz:
3948 case X86::VFCMADDCPHZ256mkz:
3949 case X86::VFCMADDCPHZmkz:
3950 case X86::VFCMADDCPHZ128r:
3951 case X86::VFCMADDCPHZ256r:
3952 case X86::VFCMADDCPHZr:
3953 case X86::VFCMADDCPHZ128rk:
3954 case X86::VFCMADDCPHZ256rk:
3955 case X86::VFCMADDCPHZrk:
3956 case X86::VFCMADDCPHZ128rkz:
3957 case X86::VFCMADDCPHZ256rkz:
3958 case X86::VFCMADDCPHZrkz:
3959 case X86::VFCMADDCPHZrb:
3960 case X86::VFCMADDCPHZrbk:
3961 case X86::VFCMADDCPHZrbkz:
3962 case X86::VFCMADDCSHZm:
3963 case X86::VFCMADDCSHZmk:
3964 case X86::VFCMADDCSHZmkz:
3965 case X86::VFCMADDCSHZr:
3966 case X86::VFCMADDCSHZrb:
3967 case X86::VFCMADDCSHZrbk:
3968 case X86::VFCMADDCSHZrbkz:
3969 case X86::VFCMADDCSHZrk:
3970 case X86::VFCMADDCSHZrkz:
3971 case X86::VFMADDCPHZ128m:
3972 case X86::VFMADDCPHZ256m:
3973 case X86::VFMADDCPHZm:
3974 case X86::VFMADDCPHZ128mb:
3975 case X86::VFMADDCPHZ256mb:
3976 case X86::VFMADDCPHZmb:
3977 case X86::VFMADDCPHZ128mbk:
3978 case X86::VFMADDCPHZ256mbk:
3979 case X86::VFMADDCPHZmbk:
3980 case X86::VFMADDCPHZ128mbkz:
3981 case X86::VFMADDCPHZ256mbkz:
3982 case X86::VFMADDCPHZmbkz:
3983 case X86::VFMADDCPHZ128mk:
3984 case X86::VFMADDCPHZ256mk:
3985 case X86::VFMADDCPHZmk:
3986 case X86::VFMADDCPHZ128mkz:
3987 case X86::VFMADDCPHZ256mkz:
3988 case X86::VFMADDCPHZmkz:
3989 case X86::VFMADDCPHZ128r:
3990 case X86::VFMADDCPHZ256r:
3991 case X86::VFMADDCPHZr:
3992 case X86::VFMADDCPHZ128rk:
3993 case X86::VFMADDCPHZ256rk:
3994 case X86::VFMADDCPHZrk:
3995 case X86::VFMADDCPHZ128rkz:
3996 case X86::VFMADDCPHZ256rkz:
3997 case X86::VFMADDCPHZrkz:
3998 case X86::VFMADDCPHZrb:
3999 case X86::VFMADDCPHZrbk:
4000 case X86::VFMADDCPHZrbkz:
4001 case X86::VFMADDCSHZm:
4002 case X86::VFMADDCSHZmk:
4003 case X86::VFMADDCSHZmkz:
4004 case X86::VFMADDCSHZr:
4005 case X86::VFMADDCSHZrb:
4006 case X86::VFMADDCSHZrbk:
4007 case X86::VFMADDCSHZrbkz:
4008 case X86::VFMADDCSHZrk:
4009 case X86::VFMADDCSHZrkz: {
4013 return Warning(Ops[0]->getStartLoc(),
"Destination register should be "
4014 "distinct from source registers");
4017 case X86::VFCMULCPHZ128rm:
4018 case X86::VFCMULCPHZ256rm:
4019 case X86::VFCMULCPHZrm:
4020 case X86::VFCMULCPHZ128rmb:
4021 case X86::VFCMULCPHZ256rmb:
4022 case X86::VFCMULCPHZrmb:
4023 case X86::VFCMULCPHZ128rmbk:
4024 case X86::VFCMULCPHZ256rmbk:
4025 case X86::VFCMULCPHZrmbk:
4026 case X86::VFCMULCPHZ128rmbkz:
4027 case X86::VFCMULCPHZ256rmbkz:
4028 case X86::VFCMULCPHZrmbkz:
4029 case X86::VFCMULCPHZ128rmk:
4030 case X86::VFCMULCPHZ256rmk:
4031 case X86::VFCMULCPHZrmk:
4032 case X86::VFCMULCPHZ128rmkz:
4033 case X86::VFCMULCPHZ256rmkz:
4034 case X86::VFCMULCPHZrmkz:
4035 case X86::VFCMULCPHZ128rr:
4036 case X86::VFCMULCPHZ256rr:
4037 case X86::VFCMULCPHZrr:
4038 case X86::VFCMULCPHZ128rrk:
4039 case X86::VFCMULCPHZ256rrk:
4040 case X86::VFCMULCPHZrrk:
4041 case X86::VFCMULCPHZ128rrkz:
4042 case X86::VFCMULCPHZ256rrkz:
4043 case X86::VFCMULCPHZrrkz:
4044 case X86::VFCMULCPHZrrb:
4045 case X86::VFCMULCPHZrrbk:
4046 case X86::VFCMULCPHZrrbkz:
4047 case X86::VFCMULCSHZrm:
4048 case X86::VFCMULCSHZrmk:
4049 case X86::VFCMULCSHZrmkz:
4050 case X86::VFCMULCSHZrr:
4051 case X86::VFCMULCSHZrrb:
4052 case X86::VFCMULCSHZrrbk:
4053 case X86::VFCMULCSHZrrbkz:
4054 case X86::VFCMULCSHZrrk:
4055 case X86::VFCMULCSHZrrkz:
4056 case X86::VFMULCPHZ128rm:
4057 case X86::VFMULCPHZ256rm:
4058 case X86::VFMULCPHZrm:
4059 case X86::VFMULCPHZ128rmb:
4060 case X86::VFMULCPHZ256rmb:
4061 case X86::VFMULCPHZrmb:
4062 case X86::VFMULCPHZ128rmbk:
4063 case X86::VFMULCPHZ256rmbk:
4064 case X86::VFMULCPHZrmbk:
4065 case X86::VFMULCPHZ128rmbkz:
4066 case X86::VFMULCPHZ256rmbkz:
4067 case X86::VFMULCPHZrmbkz:
4068 case X86::VFMULCPHZ128rmk:
4069 case X86::VFMULCPHZ256rmk:
4070 case X86::VFMULCPHZrmk:
4071 case X86::VFMULCPHZ128rmkz:
4072 case X86::VFMULCPHZ256rmkz:
4073 case X86::VFMULCPHZrmkz:
4074 case X86::VFMULCPHZ128rr:
4075 case X86::VFMULCPHZ256rr:
4076 case X86::VFMULCPHZrr:
4077 case X86::VFMULCPHZ128rrk:
4078 case X86::VFMULCPHZ256rrk:
4079 case X86::VFMULCPHZrrk:
4080 case X86::VFMULCPHZ128rrkz:
4081 case X86::VFMULCPHZ256rrkz:
4082 case X86::VFMULCPHZrrkz:
4083 case X86::VFMULCPHZrrb:
4084 case X86::VFMULCPHZrrbk:
4085 case X86::VFMULCPHZrrbkz:
4086 case X86::VFMULCSHZrm:
4087 case X86::VFMULCSHZrmk:
4088 case X86::VFMULCSHZrmkz:
4089 case X86::VFMULCSHZrr:
4090 case X86::VFMULCSHZrrb:
4091 case X86::VFMULCSHZrrbk:
4092 case X86::VFMULCSHZrrbkz:
4093 case X86::VFMULCSHZrrk:
4094 case X86::VFMULCSHZrrkz: {
4098 return Warning(Ops[0]->getStartLoc(),
"Destination register should be "
4099 "distinct from source registers");
4111 for (
unsigned i = 0;
i != NumOps; ++
i) {
4123 if (UsesRex && HReg != X86::NoRegister) {
4125 return Error(Ops[0]->getStartLoc(),
4126 "can't encode '" +
RegName +
"' in an instruction requiring "
4136 void X86AsmParser::emitWarningForSpecialLVIInstruction(
SMLoc Loc) {
4137 Warning(Loc,
"Instruction may be vulnerable to LVI and "
4138 "requires manual mitigation");
4139 Note(
SMLoc(),
"See https://software.intel.com/"
4140 "security-software-guidance/insights/"
4141 "deep-dive-load-value-injection#specialinstructions"
4142 " for more information");
4166 bool Parse32 = is32BitMode() || Code16GCC;
4168 is64BitMode() ? X86::RSP : (Parse32 ?
X86::ESP : X86::SP);
4174 ShlMemOp->addMemOperands(ShlInst, 5);
4187 emitWarningForSpecialLVIInstruction(Inst.
getLoc());
4199 void X86AsmParser::applyLVILoadHardeningMitigation(
MCInst &Inst,
4216 emitWarningForSpecialLVIInstruction(Inst.
getLoc());
4219 }
else if (Opcode == X86::REP_PREFIX || Opcode == X86::REPNE_PREFIX) {
4222 emitWarningForSpecialLVIInstruction(Inst.
getLoc());
4244 getSTI().getFeatureBits()[X86::FeatureLVIControlFlowIntegrity])
4245 applyLVICFIMitigation(Inst, Out);
4250 getSTI().getFeatureBits()[X86::FeatureLVILoadHardening])
4251 applyLVILoadHardeningMitigation(Inst, Out);
4254 bool X86AsmParser::MatchAndEmitInstruction(
SMLoc IDLoc,
unsigned &Opcode,
4257 bool MatchingInlineAsm) {
4258 if (isParsingIntelSyntax())
4267 bool MatchingInlineAsm) {
4272 .
Case(
"finit",
"fninit")
4273 .
Case(
"fsave",
"fnsave")
4274 .
Case(
"fstcw",
"fnstcw")
4275 .
Case(
"fstcww",
"fnstcw")
4276 .
Case(
"fstenv",
"fnstenv")
4277 .
Case(
"fstsw",
"fnstsw")
4278 .
Case(
"fstsww",
"fnstsw")
4279 .
Case(
"fclex",
"fnclex")
4285 if (!MatchingInlineAsm)
4286 emitInstruction(Inst,
Operands, Out);
4291 bool X86AsmParser::ErrorMissingFeature(
SMLoc IDLoc,
4293 bool MatchingInlineAsm) {
4294 assert(MissingFeatures.
any() &&
"Unknown missing feature!");
4297 OS <<
"instruction requires:";
4298 for (
unsigned i = 0,
e = MissingFeatures.
size();
i !=
e; ++
i) {
4299 if (MissingFeatures[
i])
4302 return Error(IDLoc, OS.str(),
SMRange(), MatchingInlineAsm);
4306 unsigned Result = 0;
4309 Result =
Prefix.getPrefix();
4315 unsigned X86AsmParser::checkTargetMatchPredicate(
MCInst &Inst) {
4319 if (ForcedVEXEncoding == VEXEncoding_EVEX &&
4321 return Match_Unsupported;
4323 if ((ForcedVEXEncoding == VEXEncoding_VEX ||
4324 ForcedVEXEncoding == VEXEncoding_VEX2 ||
4325 ForcedVEXEncoding == VEXEncoding_VEX3) &&
4327 return Match_Unsupported;
4331 (ForcedVEXEncoding != VEXEncoding_VEX &&
4332 ForcedVEXEncoding != VEXEncoding_VEX2 &&
4333 ForcedVEXEncoding != VEXEncoding_VEX3))
4334 return Match_Unsupported;
4336 return Match_Success;
4339 bool X86AsmParser::MatchAndEmitATTInstruction(
SMLoc IDLoc,
unsigned &Opcode,
4343 bool MatchingInlineAsm) {
4345 assert((*
Operands[0]).isToken() &&
"Leading operand should always be a mnemonic!");
4350 Out, MatchingInlineAsm);
4358 if (ForcedVEXEncoding == VEXEncoding_VEX)
4360 else if (ForcedVEXEncoding == VEXEncoding_VEX2)
4362 else if (ForcedVEXEncoding == VEXEncoding_VEX3)
4364 else if (ForcedVEXEncoding == VEXEncoding_EVEX)
4368 if (ForcedDispEncoding == DispEncoding_Disp8)
4370 else if (ForcedDispEncoding == DispEncoding_Disp32)
4378 if (ForcedDataPrefix == X86::Is32Bit)
4379 SwitchMode(X86::Is32Bit);
4383 MissingFeatures, MatchingInlineAsm,
4384 isParsingIntelSyntax());
4385 if (ForcedDataPrefix == X86::Is32Bit) {
4386 SwitchMode(X86::Is16Bit);
4387 ForcedDataPrefix = 0;
4389 switch (OriginalError) {
4392 if (!MatchingInlineAsm && validateInstruction(Inst,
Operands))
4397 if (!MatchingInlineAsm)
4398 while (processInstruction(Inst,
Operands))
4402 if (!MatchingInlineAsm)
4403 emitInstruction(Inst,
Operands, Out);
4406 case Match_InvalidImmUnsignedi4: {
4408 if (ErrorLoc ==
SMLoc())
4410 return Error(ErrorLoc,
"immediate must be an integer in range [0, 15]",
4411 EmptyRange, MatchingInlineAsm);
4413 case Match_MissingFeature:
4414 return ErrorMissingFeature(IDLoc, MissingFeatures, MatchingInlineAsm);
4415 case Match_InvalidOperand:
4416 case Match_MnemonicFail:
4417 case Match_Unsupported:
4420 if (
Op.getToken().empty()) {
4421 Error(IDLoc,
"instruction must have size higher than 0", EmptyRange,
4436 Op.setTokenValue(Tmp);
4444 const char *Suffixes =
Base[0] !=
'f' ?
"bwlq" :
"slt\0";
4446 const char *MemSize =
Base[0] !=
'f' ?
"\x08\x10\x20\x40" :
"\x20\x40\x50\0";
4458 bool HasVectorReg =
false;
4463 HasVectorReg =
true;
4464 else if (X86Op->
isMem()) {
4466 assert(
MemOp->Mem.Size == 0 &&
"Memory size always 0 under ATT syntax");
4474 Tmp.back() = Suffixes[
I];
4475 if (
MemOp && HasVectorReg)
4476 MemOp->Mem.Size = MemSize[
I];
4477 Match[
I] = Match_MnemonicFail;
4478 if (
MemOp || !HasVectorReg) {
4480 MatchInstruction(
Operands, Inst, ErrorInfoIgnore, MissingFeatures,
4481 MatchingInlineAsm, isParsingIntelSyntax());
4483 if (Match[
I] == Match_MissingFeature)
4484 ErrorInfoMissingFeatures = MissingFeatures;
4494 unsigned NumSuccessfulMatches =
llvm::count(Match, Match_Success);
4495 if (NumSuccessfulMatches == 1) {
4496 if (!MatchingInlineAsm && validateInstruction(Inst,
Operands))
4501 if (!MatchingInlineAsm)
4502 while (processInstruction(Inst,
Operands))
4506 if (!MatchingInlineAsm)
4507 emitInstruction(Inst,
Operands, Out);
4516 if (NumSuccessfulMatches > 1) {
4518 unsigned NumMatches = 0;
4520 if (Match[
I] == Match_Success)
4521 MatchChars[NumMatches++] = Suffixes[
I];
4525 OS <<
"ambiguous instructions require an explicit suffix (could be ";
4526 for (
unsigned i = 0;
i != NumMatches; ++
i) {
4529 if (
i + 1 == NumMatches)
4531 OS <<
"'" <<
Base << MatchChars[
i] <<
"'";
4534 Error(IDLoc, OS.str(), EmptyRange, MatchingInlineAsm);
4542 if (
llvm::count(Match, Match_MnemonicFail) == 4) {
4543 if (OriginalError == Match_MnemonicFail)
4544 return Error(IDLoc,
"invalid instruction mnemonic '" +
Base +
"'",
4545 Op.getLocRange(), MatchingInlineAsm);
4547 if (OriginalError == Match_Unsupported)
4548 return Error(IDLoc,
"unsupported instruction", EmptyRange,
4551 assert(OriginalError == Match_InvalidOperand &&
"Unexpected error");
4555 return Error(IDLoc,
"too few operands for instruction", EmptyRange,
4562 OperandRange, MatchingInlineAsm);
4566 return Error(IDLoc,
"invalid operand for instruction", EmptyRange,
4572 return Error(IDLoc,
"unsupported instruction", EmptyRange,
4578 if (
llvm::count(Match, Match_MissingFeature) == 1) {
4580 return ErrorMissingFeature(IDLoc, ErrorInfoMissingFeatures,
4586 if (
llvm::count(Match, Match_InvalidOperand) == 1) {
4587 return Error(IDLoc,
"invalid operand for instruction", EmptyRange,
4592 Error(IDLoc,
"unknown use of instruction mnemonic without a size suffix",
4593 EmptyRange, MatchingInlineAsm);
4597 bool X86AsmParser::MatchAndEmitIntelInstruction(
SMLoc IDLoc,
unsigned &Opcode,
4601 bool MatchingInlineAsm) {
4603 assert((*
Operands[0]).isToken() &&
"Leading operand should always be a mnemonic!");
4617 if (ForcedVEXEncoding == VEXEncoding_VEX)
4619 else if (ForcedVEXEncoding == VEXEncoding_VEX2)
4621 else if (ForcedVEXEncoding == VEXEncoding_VEX3)
4623 else if (ForcedVEXEncoding == VEXEncoding_EVEX)
4627 if (ForcedDispEncoding == DispEncoding_Disp8)
4629 else if (ForcedDispEncoding == DispEncoding_Disp32)
4640 UnsizedMemOp = X86Op;
4650 static const char *
const PtrSizedInstrs[] = {
"call",
"jmp",
"push"};
4651 for (
const char *Instr : PtrSizedInstrs) {
4652 if (Mnemonic == Instr) {
4653 UnsizedMemOp->
Mem.Size = getPointerWidth();
4665 if (Mnemonic ==
"push" &&
Operands.size() == 2) {
4667 if (X86Op->
isImm()) {
4669 const auto *
CE = dyn_cast<MCConstantExpr>(X86Op->
getImm());
4670 unsigned Size = getPointerWidth();
4675 Tmp += (is64BitMode())
4677 : (is32BitMode()) ?
"l" : (is16BitMode()) ?
"w" :
" ";
4678 Op.setTokenValue(Tmp);
4681 MissingFeatures, MatchingInlineAsm,
4692 static const unsigned MopSizes[] = {8, 16, 32, 64, 80, 128, 256, 512};
4693 for (
unsigned Size : MopSizes) {
4694 UnsizedMemOp->
Mem.Size =
Size;
4697 unsigned M = MatchInstruction(
Operands, Inst, ErrorInfoIgnore,
4698 MissingFeatures, MatchingInlineAsm,
4699 isParsingIntelSyntax());
4704 if (
Match.back() == Match_MissingFeature)
4705 ErrorInfoMissingFeatures = MissingFeatures;
4709 UnsizedMemOp->
Mem.Size = 0;
4715 if (
Match.empty()) {
4716 Match.push_back(MatchInstruction(
4718 isParsingIntelSyntax()));
4720 if (
Match.back() == Match_MissingFeature)
4721 ErrorInfoMissingFeatures = MissingFeatures;
4726 UnsizedMemOp->
Mem.Size = 0;
4729 if (
Match.back() == Match_MnemonicFail) {
4730 return Error(IDLoc,
"invalid instruction mnemonic '" + Mnemonic +
"'",
4731 Op.getLocRange(), MatchingInlineAsm);
4734 unsigned NumSuccessfulMatches =
llvm::count(Match, Match_Success);
4738 if (UnsizedMemOp && NumSuccessfulMatches > 1 &&
4741 unsigned M = MatchInstruction(
4743 isParsingIntelSyntax());
4744 if (M == Match_Success)
4745 NumSuccessfulMatches = 1;
4757 if (NumSuccessfulMatches == 1) {
4758 if (!MatchingInlineAsm && validateInstruction(Inst,
Operands))
4763 if (!MatchingInlineAsm)
4764 while (processInstruction(Inst,
Operands))
4767 if (!MatchingInlineAsm)
4768 emitInstruction(Inst,
Operands, Out);
4771 }
else if (NumSuccessfulMatches > 1) {
4773 "multiple matches only possible with unsized memory operands");
4775 "ambiguous operand size for instruction '" + Mnemonic +
"\'",
4781 return Error(IDLoc,
"unsupported instruction", EmptyRange,
4787 if (
llvm::count(Match, Match_MissingFeature) == 1) {
4789 return ErrorMissingFeature(IDLoc, ErrorInfoMissingFeatures,
4795 if (
llvm::count(Match, Match_InvalidOperand) == 1) {
4796 return Error(IDLoc,
"invalid operand for instruction", EmptyRange,
4800 if (
llvm::count(Match, Match_InvalidImmUnsignedi4) == 1) {
4802 if (ErrorLoc ==
SMLoc())
4804 return Error(ErrorLoc,
"immediate must be an integer in range [0, 15]",
4805 EmptyRange, MatchingInlineAsm);
4809 return Error(IDLoc,
"unknown instruction mnemonic", EmptyRange,
4813 bool X86AsmParser::OmitRegisterFromClobberLists(
unsigned RegNo) {
4814 return X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(RegNo);
4817 bool X86AsmParser::ParseDirective(
AsmToken DirectiveID) {
4821 return parseDirectiveArch();
4823 return ParseDirectiveCode(IDVal, DirectiveID.
getLoc());
4829 return Error(DirectiveID.
getLoc(),
"'.att_syntax noprefix' is not "
4830 "supported: registers must have a "
4831 "'%' prefix in .att_syntax");
4833 getParser().setAssemblerDialect(0);
4835 }
else if (IDVal.
startswith(
".intel_syntax")) {
4836 getParser().setAssemblerDialect(1);
4841 return Error(DirectiveID.
getLoc(),
"'.intel_syntax prefix' is not "
4842 "supported: registers must not have "
4843 "a '%' prefix in .intel_syntax");
4846 }
else if (IDVal ==
".nops")
4847 return parseDirectiveNops(DirectiveID.
getLoc());
4848 else if (IDVal ==
".even")
4849 return parseDirectiveEven(DirectiveID.
getLoc());
4850 else if (IDVal ==
".cv_fpo_proc")
4851 return parseDirectiveFPOProc(DirectiveID.
getLoc());
4852 else if (IDVal ==
".cv_fpo_setframe")
4853 return parseDirectiveFPOSetFrame(DirectiveID.
getLoc());
4854 else if (IDVal ==
".cv_fpo_pushreg")
4855 return parseDirectiveFPOPushReg(DirectiveID.
getLoc());
4856 else if (IDVal ==
".cv_fpo_stackalloc")
4857 return parseDirectiveFPOStackAlloc(DirectiveID.
getLoc());
4858 else if (IDVal ==
".cv_fpo_stackalign")
4859 return parseDirectiveFPOStackAlign(DirectiveID.
getLoc());
4860 else if (IDVal ==
".cv_fpo_endprologue")
4861 return parseDirectiveFPOEndPrologue(DirectiveID.
getLoc());
4862 else if (IDVal ==
".cv_fpo_endproc")
4863 return parseDirectiveFPOEndProc(DirectiveID.
getLoc());
4864 else if (IDVal ==
".seh_pushreg" ||
4866 return parseDirectiveSEHPushReg(DirectiveID.
getLoc());
4867 else if (IDVal ==
".seh_setframe" ||
4869 return parseDirectiveSEHSetFrame(DirectiveID.
getLoc());
4870 else if (IDVal ==
".seh_savereg" ||
4872 return parseDirectiveSEHSaveReg(DirectiveID.
getLoc());
4873 else if (IDVal ==
".seh_savexmm" ||
4875 return parseDirectiveSEHSaveXMM(DirectiveID.
getLoc());
4876 else if (IDVal ==
".seh_pushframe" ||
4878 return parseDirectiveSEHPushFrame(DirectiveID.
getLoc());
4883 bool X86AsmParser::parseDirectiveArch() {
4885 getParser().parseStringToEndOfStatement();
4891 bool X86AsmParser::parseDirectiveNops(
SMLoc L) {
4892 int64_t NumBytes = 0,
Control = 0;
4893 SMLoc NumBytesLoc, ControlLoc;
4895 NumBytesLoc = getTok().getLoc();
4896 if (getParser().checkForValidSection() ||
4897 getParser().parseAbsoluteExpression(NumBytes))
4901 ControlLoc = getTok().getLoc();
4902 if (getParser().parseAbsoluteExpression(
Control))
4906 "unexpected token in '.nops' directive"))
4909 if (NumBytes <= 0) {
4910 Error(NumBytesLoc,
"'.nops' directive with non-positive size");
4915 Error(ControlLoc,
"'.nops' directive with negative NOP size");
4920 getParser().getStreamer().emitNops(NumBytes,
Control, L, STI);
4927 bool X86AsmParser::parseDirectiveEven(
SMLoc L) {
4933 getStreamer().initSections(
false, getSTI());
4934 Section = getStreamer().getCurrentSectionOnly();
4937 getStreamer().emitCodeAlignment(2, &getSTI(), 0);
4939 getStreamer().emitValueToAlignment(2, 0, 1, 0);
4945 bool X86AsmParser::ParseDirectiveCode(
StringRef IDVal,
SMLoc L) {
4948 if (IDVal ==
".code16") {
4950 if (!is16BitMode()) {
4951 SwitchMode(X86::Is16Bit);
4952 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code16);
4954 }
else if (IDVal ==
".code16gcc") {
4958 if (!is16BitMode()) {
4959 SwitchMode(X86::Is16Bit);
4960 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code16);
4962 }
else if (IDVal ==
".code32") {
4964 if (!is32BitMode()) {
4965 SwitchMode(X86::Is32Bit);
4966 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code32);
4968 }
else if (IDVal ==
".code64") {
4970 if (!is64BitMode()) {
4971 SwitchMode(X86::Is64Bit);
4972 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code64);
4975 Error(L,
"unknown directive " + IDVal);
4983 bool X86AsmParser::parseDirectiveFPOProc(
SMLoc L) {
4988 return Parser.
TokError(
"expected symbol name");
4989 if (Parser.
parseIntToken(ParamsSize,
"expected parameter byte count"))
4992 return Parser.
TokError(
"parameters size out of range");
4995 MCSymbol *ProcSym = getContext().getOrCreateSymbol(ProcName);
4996 return getTargetStreamer().emitFPOProc(ProcSym, ParamsSize, L);
5000 bool X86AsmParser::parseDirectiveFPOSetFrame(
SMLoc L) {
5003 if (ParseRegister(
Reg, DummyLoc, DummyLoc) || parseEOL())
5005 return getTargetStreamer().emitFPOSetFrame(
Reg, L);
5009 bool X86AsmParser::parseDirectiveFPOPushReg(
SMLoc L) {
5012 if (ParseRegister(
Reg, DummyLoc, DummyLoc) || parseEOL())
5014 return getTargetStreamer().emitFPOPushReg(
Reg, L);
5018 bool X86AsmParser::parseDirectiveFPOStackAlloc(
SMLoc L) {
5021 if (Parser.
parseIntToken(Offset,
"expected offset") || parseEOL())
5023 return getTargetStreamer().emitFPOStackAlloc(Offset, L);
5027 bool X86AsmParser::parseDirectiveFPOStackAlign(
SMLoc L) {
5030 if (Parser.
parseIntToken(Offset,
"expected offset") || parseEOL())
5032 return getTargetStreamer().emitFPOStackAlign(Offset, L);
5036 bool X86AsmParser::parseDirectiveFPOEndPrologue(
SMLoc L) {
5040 return getTargetStreamer().emitFPOEndPrologue(L);
5044 bool X86AsmParser::parseDirectiveFPOEndProc(
SMLoc L) {
5048 return getTargetStreamer().emitFPOEndProc(L);
5051 bool X86AsmParser::parseSEHRegisterNumber(
unsigned RegClassID,
5053 SMLoc startLoc = getLexer().getLoc();
5059 if (ParseRegister(RegNo, startLoc, endLoc))
5062 if (!X86MCRegisterClasses[RegClassID].
contains(RegNo)) {
5063 return Error(startLoc,
5064 "register is not supported for use with this directive");
5070 if (getParser().parseAbsoluteExpression(EncodedReg))
5076 for (
MCPhysReg Reg : X86MCRegisterClasses[RegClassID]) {
5077 if (
MRI->getEncodingValue(
Reg) == EncodedReg) {
5083 return Error(startLoc,
5084 "incorrect register number for use with this directive");
5091 bool X86AsmParser::parseDirectiveSEHPushReg(
SMLoc Loc) {
5093 if (parseSEHRegisterNumber(X86::GR64RegClassID,
Reg))
5097 return TokError(
"unexpected token in directive");
5100 getStreamer().EmitWinCFIPushReg(
Reg, Loc);
5104 bool X86AsmParser::parseDirectiveSEHSetFrame(
SMLoc Loc) {
5107 if (parseSEHRegisterNumber(X86::GR64RegClassID,
Reg))
5110 return TokError(
"you must specify a stack pointer offset");
5113 if (getParser().parseAbsoluteExpression(Off))
5117 return TokError(
"unexpected token in directive");
5120 getStreamer().EmitWinCFISetFrame(
Reg, Off, Loc);
5124 bool X86AsmParser::parseDirectiveSEHSaveReg(
SMLoc Loc) {
5127 if (parseSEHRegisterNumber(X86::GR64RegClassID,
Reg))
5130 return TokError(
"you must specify an offset on the stack");
5133 if (getParser().parseAbsoluteExpression(Off))
5137 return TokError(
"unexpected token in directive");
5140 getStreamer().EmitWinCFISaveReg(
Reg, Off, Loc);
5144 bool X86AsmParser::parseDirectiveSEHSaveXMM(
SMLoc Loc) {
5147 if (parseSEHRegisterNumber(X86::VR128XRegClassID,
Reg))
5150 return TokError(
"you must specify an offset on the stack");
5153 if (getParser().parseAbsoluteExpression(Off))
5157 return TokError(
"unexpected token in directive");
5160 getStreamer().EmitWinCFISaveXMM(
Reg, Off, Loc);
5164 bool X86AsmParser::parseDirectiveSEHPushFrame(
SMLoc Loc) {
5168 SMLoc startLoc = getLexer().getLoc();
5170 if (!getParser().parseIdentifier(CodeID)) {
5171 if (CodeID !=
"code")
5172 return Error(startLoc,
"expected @code");
5178 return TokError(
"unexpected token in directive");
5181 getStreamer().EmitWinCFIPushFrame(Code, Loc);
5191 #define GET_REGISTER_MATCHER
5192 #define GET_MATCHER_IMPLEMENTATION
5193 #define GET_SUBTARGET_FEATURE_NAME
5194 #include "X86GenAsmMatcher.inc"