47 "x86-experimental-lvi-inline-asm-hardening",
48 cl::desc(
"Harden inline assembly code that may be vulnerable to Load Value"
49 " Injection (LVI). This feature is experimental."),
cl::Hidden);
52 if (Scale != 1 && Scale != 2 && Scale != 4 && Scale != 8) {
53 ErrMsg =
"scale factor in address must be 1, 2, 4 or 8";
61static const char OpPrecedence[] = {
89 unsigned ForcedDataPrefix = 0;
99 VEXEncoding ForcedVEXEncoding = VEXEncoding_Default;
102 DispEncoding_Default,
107 DispEncoding ForcedDispEncoding = DispEncoding_Default;
110 bool UseApxExtendedReg =
false;
113 SMLoc consumeToken() {
122 "do not have a target streamer");
129 bool matchingInlineAsm,
unsigned VariantID = 0) {
132 SwitchMode(X86::Is32Bit);
134 MissingFeatures, matchingInlineAsm,
137 SwitchMode(X86::Is16Bit);
141 enum InfixCalculatorTok {
166 enum IntelOperatorKind {
173 enum MasmOperatorKind {
180 class InfixCalculator {
181 typedef std::pair< InfixCalculatorTok, int64_t > ICToken;
185 bool isUnaryOperator(InfixCalculatorTok
Op)
const {
186 return Op == IC_NEG ||
Op == IC_NOT;
190 int64_t popOperand() {
191 assert (!PostfixStack.
empty() &&
"Poped an empty stack!");
193 if (!(
Op.first == IC_IMM ||
Op.first == IC_REGISTER))
197 void pushOperand(InfixCalculatorTok
Op, int64_t Val = 0) {
198 assert ((
Op == IC_IMM ||
Op == IC_REGISTER) &&
199 "Unexpected operand!");
203 void popOperator() { InfixOperatorStack.
pop_back(); }
204 void pushOperator(InfixCalculatorTok
Op) {
206 if (InfixOperatorStack.
empty()) {
214 unsigned Idx = InfixOperatorStack.
size() - 1;
215 InfixCalculatorTok StackOp = InfixOperatorStack[
Idx];
216 if (OpPrecedence[
Op] > OpPrecedence[StackOp] || StackOp == IC_LPAREN) {
223 unsigned ParenCount = 0;
226 if (InfixOperatorStack.
empty())
229 Idx = InfixOperatorStack.
size() - 1;
230 StackOp = InfixOperatorStack[
Idx];
231 if (!(OpPrecedence[StackOp] >= OpPrecedence[
Op] || ParenCount))
236 if (!ParenCount && StackOp == IC_LPAREN)
239 if (StackOp == IC_RPAREN) {
242 }
else if (StackOp == IC_LPAREN) {
247 PostfixStack.
push_back(std::make_pair(StackOp, 0));
256 while (!InfixOperatorStack.
empty()) {
257 InfixCalculatorTok StackOp = InfixOperatorStack.
pop_back_val();
258 if (StackOp != IC_LPAREN && StackOp != IC_RPAREN)
259 PostfixStack.
push_back(std::make_pair(StackOp, 0));
262 if (PostfixStack.
empty())
266 for (
unsigned i = 0, e = PostfixStack.
size(); i != e; ++i) {
267 ICToken
Op = PostfixStack[i];
268 if (
Op.first == IC_IMM ||
Op.first == IC_REGISTER) {
270 }
else if (isUnaryOperator(
Op.first)) {
271 assert (OperandStack.
size() > 0 &&
"Too few operands.");
273 assert (Operand.first == IC_IMM &&
274 "Unary operation with a register!");
280 OperandStack.
push_back(std::make_pair(IC_IMM, -Operand.second));
283 OperandStack.
push_back(std::make_pair(IC_IMM, ~Operand.second));
287 assert (OperandStack.
size() > 1 &&
"Too few operands.");
296 Val = Op1.second + Op2.second;
297 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
300 Val = Op1.second - Op2.second;
301 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
304 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
305 "Multiply operation with an immediate and a register!");
306 Val = Op1.second * Op2.second;
307 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
310 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
311 "Divide operation with an immediate and a register!");
312 assert (Op2.second != 0 &&
"Division by zero!");
313 Val = Op1.second / Op2.second;
314 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
317 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
318 "Modulo operation with an immediate and a register!");
319 Val = Op1.second % Op2.second;
320 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
323 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
324 "Or operation with an immediate and a register!");
325 Val = Op1.second | Op2.second;
326 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
329 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
330 "Xor operation with an immediate and a register!");
331 Val = Op1.second ^ Op2.second;
332 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
335 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
336 "And operation with an immediate and a register!");
337 Val = Op1.second & Op2.second;
338 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
341 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
342 "Left shift operation with an immediate and a register!");
343 Val = Op1.second << Op2.second;
344 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
347 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
348 "Right shift operation with an immediate and a register!");
349 Val = Op1.second >> Op2.second;
350 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
353 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
354 "Equals operation with an immediate and a register!");
355 Val = (Op1.second == Op2.second) ? -1 : 0;
356 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
359 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
360 "Not-equals operation with an immediate and a register!");
361 Val = (Op1.second != Op2.second) ? -1 : 0;
362 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
365 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
366 "Less-than operation with an immediate and a register!");
367 Val = (Op1.second < Op2.second) ? -1 : 0;
368 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
371 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
372 "Less-than-or-equal operation with an immediate and a "
374 Val = (Op1.second <= Op2.second) ? -1 : 0;
375 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
378 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
379 "Greater-than operation with an immediate and a register!");
380 Val = (Op1.second > Op2.second) ? -1 : 0;
381 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
384 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
385 "Greater-than-or-equal operation with an immediate and a "
387 Val = (Op1.second >= Op2.second) ? -1 : 0;
388 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
393 assert (OperandStack.
size() == 1 &&
"Expected a single result.");
398 enum IntelExprState {
428 class IntelExprStateMachine {
429 IntelExprState State = IES_INIT, PrevState = IES_ERROR;
430 unsigned BaseReg = 0, IndexReg = 0, TmpReg = 0, Scale = 0;
437 bool MemExpr =
false;
438 bool BracketUsed =
false;
439 bool OffsetOperator =
false;
440 bool AttachToOperandIdx =
false;
442 SMLoc OffsetOperatorLoc;
447 ErrMsg =
"cannot use more than one symbol in memory operand";
456 IntelExprStateMachine() =
default;
458 void addImm(int64_t imm) {
Imm += imm; }
459 short getBracCount()
const {
return BracCount; }
460 bool isMemExpr()
const {
return MemExpr; }
461 bool isBracketUsed()
const {
return BracketUsed; }
462 bool isOffsetOperator()
const {
return OffsetOperator; }
463 SMLoc getOffsetLoc()
const {
return OffsetOperatorLoc; }
464 unsigned getBaseReg()
const {
return BaseReg; }
465 unsigned getIndexReg()
const {
return IndexReg; }
466 unsigned getScale()
const {
return Scale; }
468 StringRef getSymName()
const {
return SymName; }
471 unsigned getElementSize()
const {
return CurType.
ElementSize; }
472 unsigned getLength()
const {
return CurType.
Length; }
473 int64_t getImm() {
return Imm + IC.execute(); }
474 bool isValidEndState()
const {
475 return State == IES_RBRAC || State == IES_INTEGER;
482 void setAppendAfterOperand() { AttachToOperandIdx =
true; }
484 bool isPIC()
const {
return IsPIC; }
485 void setPIC() { IsPIC =
true; }
487 bool hadError()
const {
return State == IES_ERROR; }
493 if (IsPIC && AttachToOperandIdx)
494 ErrMsg =
"Don't use 2 or more regs for mem offset in PIC model!";
496 ErrMsg =
"BaseReg/IndexReg already set!";
501 IntelExprState CurrState = State;
510 IC.pushOperator(IC_OR);
513 PrevState = CurrState;
516 IntelExprState CurrState = State;
525 IC.pushOperator(IC_XOR);
528 PrevState = CurrState;
531 IntelExprState CurrState = State;
540 IC.pushOperator(IC_AND);
543 PrevState = CurrState;
546 IntelExprState CurrState = State;
555 IC.pushOperator(IC_EQ);
558 PrevState = CurrState;
561 IntelExprState CurrState = State;
570 IC.pushOperator(IC_NE);
573 PrevState = CurrState;
576 IntelExprState CurrState = State;
585 IC.pushOperator(IC_LT);
588 PrevState = CurrState;
591 IntelExprState CurrState = State;
600 IC.pushOperator(IC_LE);
603 PrevState = CurrState;
606 IntelExprState CurrState = State;
615 IC.pushOperator(IC_GT);
618 PrevState = CurrState;
621 IntelExprState CurrState = State;
630 IC.pushOperator(IC_GE);
633 PrevState = CurrState;
636 IntelExprState CurrState = State;
645 IC.pushOperator(IC_LSHIFT);
648 PrevState = CurrState;
651 IntelExprState CurrState = State;
660 IC.pushOperator(IC_RSHIFT);
663 PrevState = CurrState;
666 IntelExprState CurrState = State;
676 IC.pushOperator(IC_PLUS);
677 if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) {
684 return regsUseUpError(ErrMsg);
691 PrevState = CurrState;
695 IntelExprState CurrState = State;
726 if (CurrState == IES_REGISTER || CurrState == IES_RPAREN ||
727 CurrState == IES_INTEGER || CurrState == IES_RBRAC ||
728 CurrState == IES_OFFSET)
729 IC.pushOperator(IC_MINUS);
730 else if (PrevState == IES_REGISTER && CurrState == IES_MULTIPLY) {
732 ErrMsg =
"Scale can't be negative";
735 IC.pushOperator(IC_NEG);
736 if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) {
743 return regsUseUpError(ErrMsg);
750 PrevState = CurrState;
754 IntelExprState CurrState = State;
780 IC.pushOperator(IC_NOT);
783 PrevState = CurrState;
785 bool onRegister(
unsigned Reg,
StringRef &ErrMsg) {
786 IntelExprState CurrState = State;
794 State = IES_REGISTER;
796 IC.pushOperand(IC_REGISTER);
800 if (PrevState == IES_INTEGER) {
802 return regsUseUpError(ErrMsg);
803 State = IES_REGISTER;
806 Scale = IC.popOperand();
809 IC.pushOperand(IC_IMM);
816 PrevState = CurrState;
824 if (ParsingMSInlineAsm)
828 if (
auto *CE = dyn_cast<MCConstantExpr>(SymRef))
829 return onInteger(
CE->getValue(), ErrMsg);
842 if (setSymRef(SymRef, SymRefName, ErrMsg))
846 IC.pushOperand(IC_IMM);
847 if (ParsingMSInlineAsm)
854 bool onInteger(int64_t TmpInt,
StringRef &ErrMsg) {
855 IntelExprState CurrState = State;
881 if (PrevState == IES_REGISTER && CurrState == IES_MULTIPLY) {
884 return regsUseUpError(ErrMsg);
892 IC.pushOperand(IC_IMM, TmpInt);
896 PrevState = CurrState;
908 State = IES_MULTIPLY;
909 IC.pushOperator(IC_MULTIPLY);
922 IC.pushOperator(IC_DIVIDE);
935 IC.pushOperator(IC_MOD);
951 IC.pushOperator(IC_PLUS);
957 assert(!BracCount &&
"BracCount should be zero on parsing's start");
967 IntelExprState CurrState = State;
976 if (BracCount-- != 1) {
977 ErrMsg =
"unexpected bracket encountered";
981 if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) {
988 return regsUseUpError(ErrMsg);
995 PrevState = CurrState;
999 IntelExprState CurrState = State;
1025 IC.pushOperator(IC_LPAREN);
1028 PrevState = CurrState;
1042 IC.pushOperator(IC_RPAREN);
1048 bool ParsingMSInlineAsm,
StringRef &ErrMsg) {
1052 ErrMsg =
"unexpected offset operator expression";
1057 if (setSymRef(Val,
ID, ErrMsg))
1059 OffsetOperator =
true;
1060 OffsetOperatorLoc = OffsetLoc;
1064 IC.pushOperand(IC_IMM);
1065 if (ParsingMSInlineAsm) {
1088 bool MatchingInlineAsm =
false) {
1090 if (MatchingInlineAsm) {
1091 if (!
getLexer().isAtStartOfStatement())
1095 return Parser.
Error(L, Msg, Range);
1101 bool RestoreOnFailure);
1103 std::unique_ptr<X86Operand> DefaultMemSIOperand(
SMLoc Loc);
1104 std::unique_ptr<X86Operand> DefaultMemDIOperand(
SMLoc Loc);
1105 bool IsSIReg(
unsigned Reg);
1106 unsigned GetSIDIForRegClass(
unsigned RegClassID,
unsigned Reg,
bool IsSIReg);
1109 std::unique_ptr<llvm::MCParsedAsmOperand> &&Src,
1110 std::unique_ptr<llvm::MCParsedAsmOperand> &&Dst);
1118 bool ParseIntelDotOperator(IntelExprStateMachine &SM,
SMLoc &
End);
1120 unsigned ParseIntelInlineAsmOperator(
unsigned OpKind);
1122 bool ParseMasmOperator(
unsigned OpKind, int64_t &Val);
1124 bool ParseIntelNamedOperator(
StringRef Name, IntelExprStateMachine &SM,
1126 bool ParseMasmNamedOperator(
StringRef Name, IntelExprStateMachine &SM,
1128 void RewriteIntelExpression(IntelExprStateMachine &SM,
SMLoc Start,
1130 bool ParseIntelExpression(IntelExprStateMachine &SM,
SMLoc &
End);
1131 bool ParseIntelInlineAsmIdentifier(
const MCExpr *&Val,
StringRef &Identifier,
1133 bool IsUnevaluatedOperand,
SMLoc &
End,
1134 bool IsParsingOffsetOperator =
false);
1136 IntelExprStateMachine &SM);
1138 bool ParseMemOperand(
unsigned SegReg,
const MCExpr *Disp,
SMLoc StartLoc,
1143 bool ParseIntelMemoryOperandSize(
unsigned &
Size);
1144 bool CreateMemForMSInlineAsm(
unsigned SegReg,
const MCExpr *Disp,
1145 unsigned BaseReg,
unsigned IndexReg,
1146 unsigned Scale,
bool NonAbsMem,
SMLoc Start,
1151 bool parseDirectiveArch();
1152 bool parseDirectiveNops(
SMLoc L);
1153 bool parseDirectiveEven(
SMLoc L);
1157 bool parseDirectiveFPOProc(
SMLoc L);
1158 bool parseDirectiveFPOSetFrame(
SMLoc L);
1159 bool parseDirectiveFPOPushReg(
SMLoc L);
1160 bool parseDirectiveFPOStackAlloc(
SMLoc L);
1161 bool parseDirectiveFPOStackAlign(
SMLoc L);
1162 bool parseDirectiveFPOEndPrologue(
SMLoc L);
1163 bool parseDirectiveFPOEndProc(
SMLoc L);
1166 bool parseSEHRegisterNumber(
unsigned RegClassID,
MCRegister &RegNo);
1167 bool parseDirectiveSEHPushReg(
SMLoc);
1168 bool parseDirectiveSEHSetFrame(
SMLoc);
1169 bool parseDirectiveSEHSaveReg(
SMLoc);
1170 bool parseDirectiveSEHSaveXMM(
SMLoc);
1171 bool parseDirectiveSEHPushFrame(
SMLoc);
1179 void emitWarningForSpecialLVIInstruction(
SMLoc Loc);
1190 bool MatchingInlineAsm)
override;
1196 bool MatchingInlineAsm);
1198 bool MatchAndEmitATTInstruction(
SMLoc IDLoc,
unsigned &
Opcode,
1201 bool MatchingInlineAsm);
1203 bool MatchAndEmitIntelInstruction(
SMLoc IDLoc,
unsigned &
Opcode,
1206 bool MatchingInlineAsm);
1215 bool ParseZ(std::unique_ptr<X86Operand> &Z,
const SMLoc &StartLoc);
1217 bool is64BitMode()
const {
1221 bool is32BitMode()
const {
1225 bool is16BitMode()
const {
1229 void SwitchMode(
unsigned mode) {
1231 FeatureBitset AllModes({X86::Is64Bit, X86::Is32Bit, X86::Is16Bit});
1240 unsigned getPointerWidth() {
1241 if (is16BitMode())
return 16;
1242 if (is32BitMode())
return 32;
1243 if (is64BitMode())
return 64;
1247 bool isParsingIntelSyntax() {
1254#define GET_ASSEMBLER_HEADER
1255#include "X86GenAsmMatcher.inc"
1260 enum X86MatchResultTy {
1262#define GET_OPERAND_DIAGNOSTIC_TYPES
1263#include "X86GenAsmMatcher.inc"
1279 SMLoc &EndLoc)
override;
1290#define GET_REGISTER_MATCHER
1291#define GET_SUBTARGET_FEATURE_NAME
1292#include "X86GenAsmMatcher.inc"
1295 unsigned Scale,
bool Is64BitMode,
1302 !(BaseReg == X86::RIP || BaseReg == X86::EIP ||
1303 X86MCRegisterClasses[X86::GR16RegClassID].
contains(BaseReg) ||
1304 X86MCRegisterClasses[X86::GR32RegClassID].
contains(BaseReg) ||
1305 X86MCRegisterClasses[X86::GR64RegClassID].
contains(BaseReg))) {
1306 ErrMsg =
"invalid base+index expression";
1310 if (IndexReg != 0 &&
1311 !(IndexReg == X86::EIZ || IndexReg == X86::RIZ ||
1312 X86MCRegisterClasses[X86::GR16RegClassID].
contains(IndexReg) ||
1313 X86MCRegisterClasses[X86::GR32RegClassID].
contains(IndexReg) ||
1314 X86MCRegisterClasses[X86::GR64RegClassID].
contains(IndexReg) ||
1315 X86MCRegisterClasses[X86::VR128XRegClassID].
contains(IndexReg) ||
1316 X86MCRegisterClasses[X86::VR256XRegClassID].
contains(IndexReg) ||
1317 X86MCRegisterClasses[X86::VR512RegClassID].
contains(IndexReg))) {
1318 ErrMsg =
"invalid base+index expression";
1322 if (((BaseReg == X86::RIP || BaseReg == X86::EIP) && IndexReg != 0) ||
1323 IndexReg == X86::EIP || IndexReg == X86::RIP ||
1324 IndexReg == X86::ESP || IndexReg == X86::RSP) {
1325 ErrMsg =
"invalid base+index expression";
1331 if (X86MCRegisterClasses[X86::GR16RegClassID].
contains(BaseReg) &&
1332 (Is64BitMode || (BaseReg != X86::BX && BaseReg != X86::BP &&
1333 BaseReg != X86::SI && BaseReg != X86::DI))) {
1334 ErrMsg =
"invalid 16-bit base register";
1339 X86MCRegisterClasses[X86::GR16RegClassID].
contains(IndexReg)) {
1340 ErrMsg =
"16-bit memory operand may not include only index register";
1344 if (BaseReg != 0 && IndexReg != 0) {
1345 if (X86MCRegisterClasses[X86::GR64RegClassID].
contains(BaseReg) &&
1346 (X86MCRegisterClasses[X86::GR16RegClassID].
contains(IndexReg) ||
1347 X86MCRegisterClasses[X86::GR32RegClassID].
contains(IndexReg) ||
1348 IndexReg == X86::EIZ)) {
1349 ErrMsg =
"base register is 64-bit, but index register is not";
1352 if (X86MCRegisterClasses[X86::GR32RegClassID].
contains(BaseReg) &&
1353 (X86MCRegisterClasses[X86::GR16RegClassID].
contains(IndexReg) ||
1354 X86MCRegisterClasses[X86::GR64RegClassID].
contains(IndexReg) ||
1355 IndexReg == X86::RIZ)) {
1356 ErrMsg =
"base register is 32-bit, but index register is not";
1359 if (X86MCRegisterClasses[X86::GR16RegClassID].
contains(BaseReg)) {
1360 if (X86MCRegisterClasses[X86::GR32RegClassID].
contains(IndexReg) ||
1361 X86MCRegisterClasses[X86::GR64RegClassID].
contains(IndexReg)) {
1362 ErrMsg =
"base register is 16-bit, but index register is not";
1365 if ((BaseReg != X86::BX && BaseReg != X86::BP) ||
1366 (IndexReg != X86::SI && IndexReg != X86::DI)) {
1367 ErrMsg =
"invalid 16-bit base/index register combination";
1374 if (!Is64BitMode && BaseReg != 0 &&
1375 (BaseReg == X86::RIP || BaseReg == X86::EIP)) {
1376 ErrMsg =
"IP-relative addressing requires 64-bit mode";
1397 if (isParsingMSInlineAsm() && isParsingIntelSyntax() &&
1398 (RegNo == X86::EFLAGS || RegNo == X86::MXCSR))
1401 if (!is64BitMode()) {
1405 if (RegNo == X86::RIZ || RegNo == X86::RIP ||
1406 X86MCRegisterClasses[X86::GR64RegClassID].
contains(RegNo) ||
1409 return Error(StartLoc,
1410 "register %" +
RegName +
" is only available in 64-bit mode",
1416 UseApxExtendedReg =
true;
1420 if (RegNo == 0 &&
RegName.startswith(
"db")) {
1479 if (isParsingIntelSyntax())
1481 return Error(StartLoc,
"invalid register name",
SMRange(StartLoc, EndLoc));
1487 SMLoc &EndLoc,
bool RestoreOnFailure) {
1493 auto OnFailure = [RestoreOnFailure, &Lexer, &Tokens]() {
1494 if (RestoreOnFailure) {
1495 while (!Tokens.
empty()) {
1502 StartLoc = PercentTok.
getLoc();
1516 if (isParsingIntelSyntax())
return true;
1517 return Error(StartLoc,
"invalid register name",
1521 if (MatchRegisterByName(RegNo, Tok.
getString(), StartLoc, EndLoc)) {
1527 if (RegNo == X86::ST0) {
1541 return Error(IntTok.
getLoc(),
"expected stack index");
1544 case 0: RegNo = X86::ST0;
break;
1545 case 1: RegNo = X86::ST1;
break;
1546 case 2: RegNo = X86::ST2;
break;
1547 case 3: RegNo = X86::ST3;
break;
1548 case 4: RegNo = X86::ST4;
break;
1549 case 5: RegNo = X86::ST5;
break;
1550 case 6: RegNo = X86::ST6;
break;
1551 case 7: RegNo = X86::ST7;
break;
1554 return Error(IntTok.
getLoc(),
"invalid stack index");
1574 if (isParsingIntelSyntax())
return true;
1575 return Error(StartLoc,
"invalid register name",
1585 return ParseRegister(Reg, StartLoc, EndLoc,
false);
1590 bool Result = ParseRegister(Reg, StartLoc, EndLoc,
true);
1591 bool PendingErrors = getParser().hasPendingError();
1592 getParser().clearPendingErrors();
1600std::unique_ptr<X86Operand> X86AsmParser::DefaultMemSIOperand(
SMLoc Loc) {
1601 bool Parse32 = is32BitMode() || Code16GCC;
1602 unsigned Basereg = is64BitMode() ? X86::RSI : (Parse32 ? X86::ESI : X86::SI);
1609std::unique_ptr<X86Operand> X86AsmParser::DefaultMemDIOperand(
SMLoc Loc) {
1610 bool Parse32 = is32BitMode() || Code16GCC;
1611 unsigned Basereg = is64BitMode() ? X86::RDI : (Parse32 ? X86::EDI : X86::DI);
1618bool X86AsmParser::IsSIReg(
unsigned Reg) {
1632unsigned X86AsmParser::GetSIDIForRegClass(
unsigned RegClassID,
unsigned Reg,
1634 switch (RegClassID) {
1636 case X86::GR64RegClassID:
1637 return IsSIReg ? X86::RSI : X86::RDI;
1638 case X86::GR32RegClassID:
1639 return IsSIReg ? X86::ESI : X86::EDI;
1640 case X86::GR16RegClassID:
1641 return IsSIReg ? X86::SI : X86::DI;
1645void X86AsmParser::AddDefaultSrcDestOperands(
1647 std::unique_ptr<llvm::MCParsedAsmOperand> &&Dst) {
1648 if (isParsingIntelSyntax()) {
1649 Operands.push_back(std::move(Dst));
1650 Operands.push_back(std::move(Src));
1653 Operands.push_back(std::move(Src));
1654 Operands.push_back(std::move(Dst));
1658bool X86AsmParser::VerifyAndAdjustOperands(
OperandVector &OrigOperands,
1661 if (OrigOperands.
size() > 1) {
1664 "Operand size mismatch");
1668 int RegClassID = -1;
1669 for (
unsigned int i = 0; i < FinalOperands.
size(); ++i) {
1673 if (FinalOp.
isReg() &&
1678 if (FinalOp.
isMem()) {
1680 if (!OrigOp.
isMem())
1689 if (RegClassID != -1 &&
1690 !X86MCRegisterClasses[RegClassID].
contains(OrigReg)) {
1692 "mismatching source and destination index registers");
1695 if (X86MCRegisterClasses[X86::GR64RegClassID].
contains(OrigReg))
1696 RegClassID = X86::GR64RegClassID;
1697 else if (X86MCRegisterClasses[X86::GR32RegClassID].
contains(OrigReg))
1698 RegClassID = X86::GR32RegClassID;
1699 else if (X86MCRegisterClasses[X86::GR16RegClassID].
contains(OrigReg))
1700 RegClassID = X86::GR16RegClassID;
1706 bool IsSI = IsSIReg(FinalReg);
1707 FinalReg = GetSIDIForRegClass(RegClassID, FinalReg, IsSI);
1709 if (FinalReg != OrigReg) {
1710 std::string
RegName = IsSI ?
"ES:(R|E)SI" :
"ES:(R|E)DI";
1713 "memory operand is only for determining the size, " +
RegName +
1714 " will be used for the location"));
1725 for (
auto &WarningMsg : Warnings) {
1726 Warning(WarningMsg.first, WarningMsg.second);
1730 for (
unsigned int i = 0; i < FinalOperands.
size(); ++i)
1734 for (
unsigned int i = 0; i < FinalOperands.
size(); ++i)
1735 OrigOperands.
push_back(std::move(FinalOperands[i]));
1741 if (isParsingIntelSyntax())
1747bool X86AsmParser::CreateMemForMSInlineAsm(
unsigned SegReg,
const MCExpr *Disp,
1748 unsigned BaseReg,
unsigned IndexReg,
1749 unsigned Scale,
bool NonAbsMem,
1767 unsigned FrontendSize = 0;
1768 void *Decl =
nullptr;
1769 bool IsGlobalLV =
false;
1772 FrontendSize =
Info.Var.Type * 8;
1773 Decl =
Info.Var.Decl;
1774 IsGlobalLV =
Info.Var.IsGlobalLV;
1779 if (BaseReg || IndexReg) {
1781 End,
Size, Identifier, Decl, 0,
1782 BaseReg && IndexReg));
1789 getPointerWidth(), SegReg, Disp, BaseReg, IndexReg, Scale, Start,
End,
1791 X86::RIP, Identifier, Decl, FrontendSize));
1799 IntelExprStateMachine &SM,
1804 !getParser().isParsingMasm())
1806 if (
Name.equals_insensitive(
"not")) {
1808 }
else if (
Name.equals_insensitive(
"or")) {
1810 }
else if (
Name.equals_insensitive(
"shl")) {
1812 }
else if (
Name.equals_insensitive(
"shr")) {
1814 }
else if (
Name.equals_insensitive(
"xor")) {
1816 }
else if (
Name.equals_insensitive(
"and")) {
1818 }
else if (
Name.equals_insensitive(
"mod")) {
1820 }
else if (
Name.equals_insensitive(
"offset")) {
1821 SMLoc OffsetLoc = getTok().getLoc();
1822 const MCExpr *Val =
nullptr;
1825 ParseError = ParseIntelOffsetOperator(Val,
ID, Info,
End);
1830 SM.onOffset(Val, OffsetLoc,
ID, Info, isParsingMSInlineAsm(), ErrMsg);
1836 if (!
Name.equals_insensitive(
"offset"))
1837 End = consumeToken();
1841 IntelExprStateMachine &SM,
1843 if (
Name.equals_insensitive(
"eq")) {
1845 }
else if (
Name.equals_insensitive(
"ne")) {
1847 }
else if (
Name.equals_insensitive(
"lt")) {
1849 }
else if (
Name.equals_insensitive(
"le")) {
1851 }
else if (
Name.equals_insensitive(
"gt")) {
1853 }
else if (
Name.equals_insensitive(
"ge")) {
1858 End = consumeToken();
1865 IntelExprStateMachine &SM) {
1869 SM.setAppendAfterOperand();
1872bool X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM,
SMLoc &
End) {
1878 if (getContext().getObjectFileInfo()->isPositionIndependent())
1887 bool UpdateLocLex =
true;
1892 if ((
Done = SM.isValidEndState()))
1894 return Error(Tok.
getLoc(),
"unknown token in expression");
1896 return Error(getLexer().getErrLoc(), getLexer().getErr());
1903 UpdateLocLex =
false;
1904 if (ParseIntelDotOperator(SM,
End))
1909 if ((
Done = SM.isValidEndState()))
1911 return Error(Tok.
getLoc(),
"unknown token in expression");
1915 UpdateLocLex =
false;
1916 if (ParseIntelDotOperator(SM,
End))
1921 if ((
Done = SM.isValidEndState()))
1923 return Error(Tok.
getLoc(),
"unknown token in expression");
1934 UpdateLocLex =
false;
1935 if (!Val->evaluateAsAbsolute(Res, getStreamer().getAssemblerPtr()))
1936 return Error(ValueLoc,
"expected absolute value");
1937 if (SM.onInteger(Res, ErrMsg))
1938 return Error(ValueLoc, ErrMsg);
1947 UpdateLocLex =
false;
1949 size_t DotOffset =
Identifier.find_first_of(
'.');
1967 const AsmToken &NextTok = getLexer().peekTok();
1976 End = consumeToken();
1983 if (!ParseRegister(Reg, IdentLoc,
End,
true)) {
1984 if (SM.onRegister(Reg, ErrMsg))
1985 return Error(IdentLoc, ErrMsg);
1989 const std::pair<StringRef, StringRef> IDField =
1993 if (!
Field.empty() &&
1994 !MatchRegisterByName(Reg,
ID, IdentLoc, IDEndLoc)) {
1995 if (SM.onRegister(Reg, ErrMsg))
1996 return Error(IdentLoc, ErrMsg);
2001 return Error(FieldStartLoc,
"unknown offset");
2002 else if (SM.onPlus(ErrMsg))
2003 return Error(getTok().getLoc(), ErrMsg);
2004 else if (SM.onInteger(
Info.Offset, ErrMsg))
2005 return Error(IdentLoc, ErrMsg);
2006 SM.setTypeInfo(
Info.Type);
2008 End = consumeToken();
2014 bool ParseError =
false;
2015 if (ParseIntelNamedOperator(Identifier, SM, ParseError,
End)) {
2021 ParseMasmNamedOperator(Identifier, SM, ParseError,
End)) {
2034 if (ParseIntelDotOperator(SM,
End))
2039 if (isParsingMSInlineAsm()) {
2041 if (
unsigned OpKind = IdentifyIntelInlineAsmOperator(Identifier)) {
2042 if (int64_t Val = ParseIntelInlineAsmOperator(OpKind)) {
2043 if (SM.onInteger(Val, ErrMsg))
2044 return Error(IdentLoc, ErrMsg);
2053 return Error(IdentLoc,
"expected identifier");
2054 if (ParseIntelInlineAsmIdentifier(Val, Identifier, Info,
false,
End))
2056 else if (SM.onIdentifierExpr(Val, Identifier, Info, FieldInfo.
Type,
2058 return Error(IdentLoc, ErrMsg);
2062 if (
unsigned OpKind = IdentifyMasmOperator(Identifier)) {
2064 if (ParseMasmOperator(OpKind, Val))
2066 if (SM.onInteger(Val, ErrMsg))
2067 return Error(IdentLoc, ErrMsg);
2070 if (!getParser().lookUpType(Identifier, FieldInfo.
Type)) {
2076 getParser().parseIdentifier(Identifier);
2080 if (getParser().lookUpField(FieldInfo.
Type.
Name, Identifier,
2084 return Error(IdentLoc,
"Unable to lookup field reference!",
2090 if (SM.onInteger(FieldInfo.
Offset, ErrMsg))
2091 return Error(IdentLoc, ErrMsg);
2095 if (getParser().parsePrimaryExpr(Val,
End, &FieldInfo.
Type)) {
2096 return Error(Tok.
getLoc(),
"Unexpected identifier!");
2097 }
else if (SM.onIdentifierExpr(Val, Identifier, Info, FieldInfo.
Type,
2099 return Error(IdentLoc, ErrMsg);
2105 SMLoc Loc = getTok().getLoc();
2106 int64_t
IntVal = getTok().getIntVal();
2107 End = consumeToken();
2108 UpdateLocLex =
false;
2111 if (IDVal ==
"f" || IDVal ==
"b") {
2113 getContext().getDirectionalLocalSymbol(IntVal, IDVal ==
"b");
2117 if (IDVal ==
"b" &&
Sym->isUndefined())
2118 return Error(Loc,
"invalid reference to undefined symbol");
2122 if (SM.onIdentifierExpr(Val, Identifier, Info,
Type,
2123 isParsingMSInlineAsm(), ErrMsg))
2124 return Error(Loc, ErrMsg);
2125 End = consumeToken();
2127 if (SM.onInteger(IntVal, ErrMsg))
2128 return Error(Loc, ErrMsg);
2131 if (SM.onInteger(IntVal, ErrMsg))
2132 return Error(Loc, ErrMsg);
2137 if (SM.onPlus(ErrMsg))
2138 return Error(getTok().getLoc(), ErrMsg);
2141 if (SM.onMinus(ErrMsg))
2142 return Error(getTok().getLoc(), ErrMsg);
2152 SM.onLShift();
break;
2154 SM.onRShift();
break;
2157 return Error(Tok.
getLoc(),
"unexpected bracket encountered");
2158 tryParseOperandIdx(PrevTK, SM);
2161 if (SM.onRBrac(ErrMsg)) {
2169 return Error(Tok.
getLoc(),
"unknown token in expression");
2171 if (!
Done && UpdateLocLex)
2172 End = consumeToken();
2179void X86AsmParser::RewriteIntelExpression(IntelExprStateMachine &SM,
2182 unsigned ExprLen =
End.getPointer() - Start.getPointer();
2184 if (SM.getSym() && !SM.isOffsetOperator()) {
2186 if (
unsigned Len = SymName.
data() - Start.getPointer())
2189 ExprLen =
End.getPointer() - (SymName.
data() + SymName.
size());
2192 if (!(SM.getBaseReg() || SM.getIndexReg() || SM.getImm())) {
2202 if (SM.getBaseReg())
2204 if (SM.getIndexReg())
2206 if (SM.isOffsetOperator())
2207 OffsetNameStr = SM.getSymName();
2209 IntelExpr Expr(BaseRegStr, IndexRegStr, SM.getScale(), OffsetNameStr,
2210 SM.getImm(), SM.isMemExpr());
2211 InstInfo->
AsmRewrites->emplace_back(Loc, ExprLen, Expr);
2215bool X86AsmParser::ParseIntelInlineAsmIdentifier(
2217 bool IsUnevaluatedOperand,
SMLoc &
End,
bool IsParsingOffsetOperator) {
2219 assert(isParsingMSInlineAsm() &&
"Expected to be parsing inline assembly.");
2223 SemaCallback->LookupInlineAsmIdentifier(LineBuf, Info, IsUnevaluatedOperand);
2234 }
while (
End.getPointer() < EndPtr);
2241 "frontend claimed part of a token?");
2247 SemaCallback->LookupInlineAsmLabel(Identifier, getSourceManager(),
2249 assert(InternalName.
size() &&
"We should have an internal name here.");
2252 if (!IsParsingOffsetOperator)
2260 MCSymbol *
Sym = getContext().getOrCreateSymbol(Identifier);
2271 const SMLoc consumedToken = consumeToken();
2273 return Error(Tok.
getLoc(),
"Expected an identifier after {");
2276 .
Case(
"rn", X86::STATIC_ROUNDING::TO_NEAREST_INT)
2277 .
Case(
"rd", X86::STATIC_ROUNDING::TO_NEG_INF)
2278 .
Case(
"ru", X86::STATIC_ROUNDING::TO_POS_INF)
2279 .
Case(
"rz", X86::STATIC_ROUNDING::TO_ZERO)
2282 return Error(Tok.
getLoc(),
"Invalid rounding mode.");
2285 return Error(Tok.
getLoc(),
"Expected - at this point");
2289 return Error(Tok.
getLoc(),
"Expected } at this point");
2292 const MCExpr *RndModeOp =
2300 return Error(Tok.
getLoc(),
"Expected } at this point");
2305 return Error(Tok.
getLoc(),
"unknown token in expression");
2309bool X86AsmParser::ParseIntelDotOperator(IntelExprStateMachine &SM,
2326 }
else if ((isParsingMSInlineAsm() || getParser().isParsingMasm()) &&
2329 TrailingDot = DotDispStr.
substr(DotDispStr.
size() - 1);
2332 const std::pair<StringRef, StringRef> BaseMember = DotDispStr.
split(
'.');
2334 if (getParser().lookUpField(SM.getType(), DotDispStr, Info) &&
2335 getParser().lookUpField(SM.getSymName(), DotDispStr, Info) &&
2336 getParser().lookUpField(DotDispStr, Info) &&
2338 SemaCallback->LookupInlineAsmField(
Base, Member,
Info.Offset)))
2339 return Error(Tok.
getLoc(),
"Unable to lookup field reference!");
2341 return Error(Tok.
getLoc(),
"Unexpected token type!");
2346 const char *DotExprEndLoc = DotDispStr.
data() + DotDispStr.
size();
2349 if (!TrailingDot.
empty())
2351 SM.addImm(
Info.Offset);
2352 SM.setTypeInfo(
Info.Type);
2362 SMLoc Start = Lex().getLoc();
2363 ID = getTok().getString();
2364 if (!isParsingMSInlineAsm()) {
2367 getParser().parsePrimaryExpr(Val,
End,
nullptr))
2368 return Error(Start,
"unexpected token!");
2369 }
else if (ParseIntelInlineAsmIdentifier(Val,
ID, Info,
false,
End,
true)) {
2370 return Error(Start,
"unable to lookup expression");
2372 return Error(Start,
"offset operator cannot yet handle constants");
2379unsigned X86AsmParser::IdentifyIntelInlineAsmOperator(
StringRef Name) {
2381 .
Cases(
"TYPE",
"type",IOK_TYPE)
2382 .
Cases(
"SIZE",
"size",IOK_SIZE)
2383 .
Cases(
"LENGTH",
"length",IOK_LENGTH)
2393unsigned X86AsmParser::ParseIntelInlineAsmOperator(
unsigned OpKind) {
2398 const MCExpr *Val =
nullptr;
2402 if (ParseIntelInlineAsmIdentifier(Val, Identifier, Info,
2407 Error(Start,
"unable to lookup expression");
2414 case IOK_LENGTH: CVal =
Info.Var.Length;
break;
2415 case IOK_SIZE: CVal =
Info.Var.Size;
break;
2416 case IOK_TYPE: CVal =
Info.Var.Type;
break;
2424unsigned X86AsmParser::IdentifyMasmOperator(
StringRef Name) {
2426 .
Case(
"type", MOK_TYPE)
2427 .
Cases(
"size",
"sizeof", MOK_SIZEOF)
2428 .
Cases(
"length",
"lengthof", MOK_LENGTHOF)
2438bool X86AsmParser::ParseMasmOperator(
unsigned OpKind, int64_t &Val) {
2444 if (OpKind == MOK_SIZEOF || OpKind == MOK_TYPE) {
2447 const AsmToken &IDTok = InParens ? getLexer().peekTok() : Parser.
getTok();
2463 IntelExprStateMachine SM;
2465 if (ParseIntelExpression(SM,
End))
2475 Val = SM.getLength();
2478 Val = SM.getElementSize();
2483 return Error(OpLoc,
"expression has unknown type",
SMRange(Start,
End));
2489bool X86AsmParser::ParseIntelMemoryOperandSize(
unsigned &
Size) {
2491 .
Cases(
"BYTE",
"byte", 8)
2492 .
Cases(
"WORD",
"word", 16)
2493 .
Cases(
"DWORD",
"dword", 32)
2494 .
Cases(
"FLOAT",
"float", 32)
2495 .
Cases(
"LONG",
"long", 32)
2496 .
Cases(
"FWORD",
"fword", 48)
2497 .
Cases(
"DOUBLE",
"double", 64)
2498 .
Cases(
"QWORD",
"qword", 64)
2499 .
Cases(
"MMWORD",
"mmword", 64)
2500 .
Cases(
"XWORD",
"xword", 80)
2501 .
Cases(
"TBYTE",
"tbyte", 80)
2502 .
Cases(
"XMMWORD",
"xmmword", 128)
2503 .
Cases(
"YMMWORD",
"ymmword", 256)
2504 .
Cases(
"ZMMWORD",
"zmmword", 512)
2509 return Error(Tok.
getLoc(),
"Expected 'PTR' or 'ptr' token!");
2522 if (ParseIntelMemoryOperandSize(
Size))
2530 return ParseRoundingModeOp(Start,
Operands);
2535 if (RegNo == X86::RIP)
2536 return Error(Start,
"rip can only be used as a base register");
2540 return Error(Start,
"expected memory operand after 'ptr', "
2541 "found register operand instead");
2546 if (!X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].
contains(RegNo))
2547 return Error(Start,
"invalid segment register");
2549 Start = Lex().getLoc();
2553 IntelExprStateMachine SM;
2554 if (ParseIntelExpression(SM,
End))
2557 if (isParsingMSInlineAsm())
2558 RewriteIntelExpression(SM, Start, Tok.
getLoc());
2560 int64_t
Imm = SM.getImm();
2561 const MCExpr *Disp = SM.getSym();
2570 if (!SM.isMemExpr() && !RegNo) {
2571 if (isParsingMSInlineAsm() && SM.isOffsetOperator()) {
2577 SM.getSymName(),
Info.Var.Decl,
2578 Info.Var.IsGlobalLV));
2588 unsigned BaseReg = SM.getBaseReg();
2589 unsigned IndexReg = SM.getIndexReg();
2590 if (IndexReg && BaseReg == X86::RIP)
2592 unsigned Scale = SM.getScale();
2594 Size = SM.getElementSize() << 3;
2596 if (Scale == 0 && BaseReg != X86::ESP && BaseReg != X86::RSP &&
2597 (IndexReg == X86::ESP || IndexReg == X86::RSP))
2603 !(X86MCRegisterClasses[X86::VR128XRegClassID].
contains(IndexReg) ||
2604 X86MCRegisterClasses[X86::VR256XRegClassID].
contains(IndexReg) ||
2605 X86MCRegisterClasses[X86::VR512RegClassID].
contains(IndexReg)) &&
2606 (X86MCRegisterClasses[X86::VR128XRegClassID].
contains(BaseReg) ||
2607 X86MCRegisterClasses[X86::VR256XRegClassID].
contains(BaseReg) ||
2608 X86MCRegisterClasses[X86::VR512RegClassID].
contains(BaseReg)))
2612 X86MCRegisterClasses[X86::GR16RegClassID].
contains(IndexReg))
2613 return Error(Start,
"16-bit addresses cannot have a scale");
2622 if ((BaseReg == X86::SI || BaseReg == X86::DI) &&
2623 (IndexReg == X86::BX || IndexReg == X86::BP))
2626 if ((BaseReg || IndexReg) &&
2629 return Error(Start, ErrMsg);
2630 bool IsUnconditionalBranch =
2631 Name.equals_insensitive(
"jmp") ||
Name.equals_insensitive(
"call");
2632 if (isParsingMSInlineAsm())
2633 return CreateMemForMSInlineAsm(RegNo, Disp, BaseReg, IndexReg, Scale,
2634 IsUnconditionalBranch && is64BitMode(),
2635 Start,
End,
Size, SM.getSymName(),
2640 unsigned DefaultBaseReg = X86::NoRegister;
2641 bool MaybeDirectBranchDest =
true;
2644 if (is64BitMode() && SM.getElementSize() > 0) {
2645 DefaultBaseReg = X86::RIP;
2647 if (IsUnconditionalBranch) {
2649 MaybeDirectBranchDest =
false;
2651 DefaultBaseReg = X86::RIP;
2652 }
else if (!BaseReg && !IndexReg && Disp &&
2654 if (is64BitMode()) {
2655 if (SM.getSize() == 8) {
2656 MaybeDirectBranchDest =
false;
2657 DefaultBaseReg = X86::RIP;
2660 if (SM.getSize() == 4 || SM.getSize() == 2)
2661 MaybeDirectBranchDest =
false;
2665 }
else if (IsUnconditionalBranch) {
2667 if (!PtrInOperand && SM.isOffsetOperator())
2669 Start,
"`OFFSET` operator cannot be used in an unconditional branch");
2670 if (PtrInOperand || SM.isBracketUsed())
2671 MaybeDirectBranchDest =
false;
2674 if ((BaseReg || IndexReg || RegNo || DefaultBaseReg != X86::NoRegister))
2676 getPointerWidth(), RegNo, Disp, BaseReg, IndexReg, Scale, Start,
End,
2678 0,
false, MaybeDirectBranchDest));
2683 MaybeDirectBranchDest));
2689 switch (getLexer().getKind()) {
2699 "expected immediate expression") ||
2700 getParser().parseExpression(Val,
End) ||
2701 check(isa<X86MCExpr>(Val), L,
"expected immediate expression"))
2708 return ParseRoundingModeOp(Start,
Operands);
2717 const MCExpr *Expr =
nullptr;
2723 if (
auto *RE = dyn_cast<X86MCExpr>(Expr)) {
2726 Reg = RE->getRegNo();
2729 if (Reg == X86::EIZ || Reg == X86::RIZ)
2731 Loc,
"%eiz and %riz can only be used as index registers",
2733 if (Reg == X86::RIP)
2734 return Error(Loc,
"%rip can only be used as a base register",
2741 if (!X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].
contains(Reg))
2742 return Error(Loc,
"invalid segment register");
2750 return ParseMemOperand(Reg, Expr, Loc, EndLoc,
Operands);
2780bool X86AsmParser::ParseZ(std::unique_ptr<X86Operand> &Z,
2781 const SMLoc &StartLoc) {
2787 (getLexer().getTok().getIdentifier() ==
"z")))
2792 return Error(getLexer().getLoc(),
"Expected } at this point");
2804 const SMLoc consumedToken = consumeToken();
2808 if (getLexer().getTok().getIntVal() != 1)
2809 return TokError(
"Expected 1to<NUM> at this point");
2813 return TokError(
"Expected 1to<NUM> at this point");
2816 StringRef BroadcastString = (
Prefix + getLexer().getTok().getIdentifier())
2819 return TokError(
"Expected 1to<NUM> at this point");
2820 const char *BroadcastPrimitive =
2822 .
Case(
"1to2",
"{1to2}")
2823 .
Case(
"1to4",
"{1to4}")
2824 .
Case(
"1to8",
"{1to8}")
2825 .
Case(
"1to16",
"{1to16}")
2826 .
Case(
"1to32",
"{1to32}")
2828 if (!BroadcastPrimitive)
2829 return TokError(
"Invalid memory broadcast primitive.");
2832 return TokError(
"Expected } at this point");
2843 std::unique_ptr<X86Operand>
Z;
2844 if (ParseZ(Z, consumedToken))
2850 SMLoc StartLoc =
Z ? consumeToken() : consumedToken;
2855 if (!parseRegister(RegNo, RegLoc, StartLoc) &&
2856 X86MCRegisterClasses[X86::VK1RegClassID].
contains(RegNo)) {
2857 if (RegNo == X86::K0)
2858 return Error(RegLoc,
"Register k0 can't be used as write mask");
2860 return Error(getLexer().getLoc(),
"Expected } at this point");
2866 return Error(getLexer().getLoc(),
2867 "Expected an op-mask register at this point");
2872 if (ParseZ(Z, consumeToken()) || !Z)
2873 return Error(getLexer().getLoc(),
2874 "Expected a {z} mark at this point");
2890bool X86AsmParser::ParseMemOperand(
unsigned SegReg,
const MCExpr *Disp,
2911 auto isAtMemOperand = [
this]() {
2916 auto TokCount = this->getLexer().peekTokens(Buf,
true);
2919 switch (Buf[0].getKind()) {
2926 if ((TokCount > 1) &&
2928 (Buf[0].getLoc().getPointer() + 1 == Buf[1].getLoc().getPointer()))
2930 Buf[1].getIdentifier().
size() + 1);
2941 MCSymbol *
Sym = this->getContext().getOrCreateSymbol(Id);
2942 if (
Sym->isVariable()) {
2943 auto V =
Sym->getVariableValue(
false);
2944 return isa<X86MCExpr>(V);
2952 if (!isAtMemOperand()) {
2955 assert(!isa<X86MCExpr>(Disp) &&
"Expected non-register here.");
2971 0, 0, 1, StartLoc, EndLoc));
2977 unsigned BaseReg = 0, IndexReg = 0, Scale = 1;
2978 SMLoc BaseLoc = getLexer().getLoc();
2985 check(!isa<X86MCExpr>(
E), BaseLoc,
"expected register here"))
2989 BaseReg = cast<X86MCExpr>(
E)->getRegNo();
2990 if (BaseReg == X86::EIZ || BaseReg == X86::RIZ)
2991 return Error(BaseLoc,
"eiz and riz can only be used as index registers",
3006 if (!isa<X86MCExpr>(
E)) {
3010 if (!
E->evaluateAsAbsolute(ScaleVal, getStreamer().getAssemblerPtr()))
3011 return Error(Loc,
"expected absolute expression");
3013 Warning(Loc,
"scale factor without index register is ignored");
3016 IndexReg = cast<X86MCExpr>(
E)->getRegNo();
3018 if (BaseReg == X86::RIP)
3020 "%rip as base register can not have an index register");
3021 if (IndexReg == X86::RIP)
3022 return Error(Loc,
"%rip is not allowed as an index register");
3033 return Error(Loc,
"expected scale expression");
3036 if (X86MCRegisterClasses[X86::GR16RegClassID].
contains(BaseReg) &&
3038 return Error(Loc,
"scale factor in 16-bit address must be 1");
3040 return Error(Loc, ErrMsg);
3054 if (BaseReg == X86::DX && IndexReg == 0 && Scale == 1 && SegReg == 0 &&
3055 isa<MCConstantExpr>(Disp) &&
3056 cast<MCConstantExpr>(Disp)->getValue() == 0) {
3063 return Error(BaseLoc, ErrMsg);
3065 if (SegReg || BaseReg || IndexReg)
3067 BaseReg, IndexReg, Scale, StartLoc,
3076bool X86AsmParser::parsePrimaryExpr(
const MCExpr *&Res,
SMLoc &EndLoc) {
3084 if (parseRegister(RegNo, StartLoc, EndLoc))
3098 ForcedVEXEncoding = VEXEncoding_Default;
3099 ForcedDispEncoding = DispEncoding_Default;
3100 UseApxExtendedReg =
false;
3113 if (Prefix ==
"vex")
3114 ForcedVEXEncoding = VEXEncoding_VEX;
3115 else if (Prefix ==
"vex2")
3116 ForcedVEXEncoding = VEXEncoding_VEX2;
3117 else if (Prefix ==
"vex3")
3118 ForcedVEXEncoding = VEXEncoding_VEX3;
3119 else if (Prefix ==
"evex")
3120 ForcedVEXEncoding = VEXEncoding_EVEX;
3121 else if (Prefix ==
"disp8")
3122 ForcedDispEncoding = DispEncoding_Disp8;
3123 else if (Prefix ==
"disp32")
3124 ForcedDispEncoding = DispEncoding_Disp32;
3126 return Error(NameLoc,
"unknown prefix");
3142 if (isParsingMSInlineAsm()) {
3143 if (
Name.equals_insensitive(
"vex"))
3144 ForcedVEXEncoding = VEXEncoding_VEX;
3145 else if (
Name.equals_insensitive(
"vex2"))
3146 ForcedVEXEncoding = VEXEncoding_VEX2;
3147 else if (
Name.equals_insensitive(
"vex3"))
3148 ForcedVEXEncoding = VEXEncoding_VEX3;
3149 else if (
Name.equals_insensitive(
"evex"))
3150 ForcedVEXEncoding = VEXEncoding_EVEX;
3152 if (ForcedVEXEncoding != VEXEncoding_Default) {
3165 if (
Name.consume_back(
".d32")) {
3166 ForcedDispEncoding = DispEncoding_Disp32;
3167 }
else if (
Name.consume_back(
".d8")) {
3168 ForcedDispEncoding = DispEncoding_Disp8;
3174 if (isParsingIntelSyntax() &&
3175 (PatchedName ==
"jmp" || PatchedName ==
"jc" || PatchedName ==
"jnc" ||
3176 PatchedName ==
"jcxz" || PatchedName ==
"jecxz" ||
3181 : NextTok ==
"short") {
3190 NextTok.
size() + 1);
3196 PatchedName !=
"setb" && PatchedName !=
"setnb")
3197 PatchedName = PatchedName.
substr(0,
Name.size()-1);
3199 unsigned ComparisonPredicate = ~0
U;
3206 bool IsVCMP = PatchedName[0] ==
'v';
3207 unsigned CCIdx =
IsVCMP ? 4 : 3;
3209 PatchedName.
slice(CCIdx, PatchedName.
size() - 2))
3211 .
Case(
"eq_oq", 0x00)
3213 .
Case(
"lt_os", 0x01)
3215 .
Case(
"le_os", 0x02)
3216 .
Case(
"unord", 0x03)
3217 .
Case(
"unord_q", 0x03)
3219 .
Case(
"neq_uq", 0x04)
3221 .
Case(
"nlt_us", 0x05)
3223 .
Case(
"nle_us", 0x06)
3225 .
Case(
"ord_q", 0x07)
3227 .
Case(
"eq_uq", 0x08)
3229 .
Case(
"nge_us", 0x09)
3231 .
Case(
"ngt_us", 0x0A)
3232 .
Case(
"false", 0x0B)
3233 .
Case(
"false_oq", 0x0B)
3234 .
Case(
"neq_oq", 0x0C)
3236 .
Case(
"ge_os", 0x0D)
3238 .
Case(
"gt_os", 0x0E)
3240 .
Case(
"true_uq", 0x0F)
3241 .
Case(
"eq_os", 0x10)
3242 .
Case(
"lt_oq", 0x11)
3243 .
Case(
"le_oq", 0x12)
3244 .
Case(
"unord_s", 0x13)
3245 .
Case(
"neq_us", 0x14)
3246 .
Case(
"nlt_uq", 0x15)
3247 .
Case(
"nle_uq", 0x16)
3248 .
Case(
"ord_s", 0x17)
3249 .
Case(
"eq_us", 0x18)
3250 .
Case(
"nge_uq", 0x19)
3251 .
Case(
"ngt_uq", 0x1A)
3252 .
Case(
"false_os", 0x1B)
3253 .
Case(
"neq_os", 0x1C)
3254 .
Case(
"ge_oq", 0x1D)
3255 .
Case(
"gt_oq", 0x1E)
3256 .
Case(
"true_us", 0x1F)
3261 PatchedName =
IsVCMP ?
"vcmpss" :
"cmpss";
3262 else if (PatchedName.
endswith(
"sd"))
3263 PatchedName =
IsVCMP ?
"vcmpsd" :
"cmpsd";
3264 else if (PatchedName.
endswith(
"ps"))
3265 PatchedName =
IsVCMP ?
"vcmpps" :
"cmpps";
3266 else if (PatchedName.
endswith(
"pd"))
3267 PatchedName =
IsVCMP ?
"vcmppd" :
"cmppd";
3268 else if (PatchedName.
endswith(
"sh"))
3269 PatchedName =
"vcmpsh";
3270 else if (PatchedName.
endswith(
"ph"))
3271 PatchedName =
"vcmpph";
3275 ComparisonPredicate =
CC;
3281 (PatchedName.
back() ==
'b' || PatchedName.
back() ==
'w' ||
3282 PatchedName.
back() ==
'd' || PatchedName.
back() ==
'q')) {
3283 unsigned SuffixSize = PatchedName.
drop_back().
back() ==
'u' ? 2 : 1;
3285 PatchedName.
slice(5, PatchedName.
size() - SuffixSize))
3295 if (
CC != ~0U && (
CC != 0 || SuffixSize == 2)) {
3296 switch (PatchedName.
back()) {
3298 case 'b': PatchedName = SuffixSize == 2 ?
"vpcmpub" :
"vpcmpb";
break;
3299 case 'w': PatchedName = SuffixSize == 2 ?
"vpcmpuw" :
"vpcmpw";
break;
3300 case 'd': PatchedName = SuffixSize == 2 ?
"vpcmpud" :
"vpcmpd";
break;
3301 case 'q': PatchedName = SuffixSize == 2 ?
"vpcmpuq" :
"vpcmpq";
break;
3304 ComparisonPredicate =
CC;
3310 (PatchedName.
back() ==
'b' || PatchedName.
back() ==
'w' ||
3311 PatchedName.
back() ==
'd' || PatchedName.
back() ==
'q')) {
3312 unsigned SuffixSize = PatchedName.
drop_back().
back() ==
'u' ? 2 : 1;
3314 PatchedName.
slice(5, PatchedName.
size() - SuffixSize))
3325 switch (PatchedName.
back()) {
3327 case 'b': PatchedName = SuffixSize == 2 ?
"vpcomub" :
"vpcomb";
break;
3328 case 'w': PatchedName = SuffixSize == 2 ?
"vpcomuw" :
"vpcomw";
break;
3329 case 'd': PatchedName = SuffixSize == 2 ?
"vpcomud" :
"vpcomd";
break;
3330 case 'q': PatchedName = SuffixSize == 2 ?
"vpcomuq" :
"vpcomq";
break;
3333 ComparisonPredicate =
CC;
3347 .
Cases(
"cs",
"ds",
"es",
"fs",
"gs",
"ss",
true)
3348 .
Cases(
"rex64",
"data32",
"data16",
"addr32",
"addr16",
true)
3349 .
Cases(
"xacquire",
"xrelease",
true)
3350 .
Cases(
"acquire",
"release", isParsingIntelSyntax())
3353 auto isLockRepeatNtPrefix = [](
StringRef N) {
3355 .
Cases(
"lock",
"rep",
"repe",
"repz",
"repne",
"repnz",
"notrack",
true)
3359 bool CurlyAsEndOfStatement =
false;
3362 while (isLockRepeatNtPrefix(
Name.lower())) {
3383 while (
Name.startswith(
";") ||
Name.startswith(
"\n") ||
3384 Name.startswith(
"#") ||
Name.startswith(
"\t") ||
3385 Name.startswith(
"/")) {
3396 if (PatchedName ==
"data16" && is16BitMode()) {
3397 return Error(NameLoc,
"redundant data16 prefix");
3399 if (PatchedName ==
"data32") {
3401 return Error(NameLoc,
"redundant data32 prefix");
3403 return Error(NameLoc,
"'data32' is not supported in 64-bit mode");
3405 PatchedName =
"data16";
3412 if (Next ==
"callw")
3414 if (Next ==
"ljmpw")
3419 ForcedDataPrefix = X86::Is32Bit;
3427 if (ComparisonPredicate != ~0U && !isParsingIntelSyntax()) {
3429 getParser().getContext());
3458 CurlyAsEndOfStatement =
3459 isParsingIntelSyntax() && isParsingMSInlineAsm() &&
3462 return TokError(
"unexpected token in argument list");
3466 if (ComparisonPredicate != ~0U && isParsingIntelSyntax()) {
3468 getParser().getContext());
3476 else if (CurlyAsEndOfStatement)
3479 getLexer().getTok().getLoc(), 0);
3486 if (IsFp &&
Operands.size() == 1) {
3488 .
Case(
"fsub",
"fsubp")
3489 .
Case(
"fdiv",
"fdivp")
3490 .
Case(
"fsubr",
"fsubrp")
3491 .
Case(
"fdivr",
"fdivrp");
3495 if ((
Name ==
"mov" ||
Name ==
"movw" ||
Name ==
"movl") &&
3503 X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(
3505 (X86MCRegisterClasses[X86::GR16RegClassID].
contains(Op1.
getReg()) ||
3506 X86MCRegisterClasses[X86::GR32RegClassID].
contains(Op1.
getReg()))) {
3508 if (
Name !=
"mov" &&
Name[3] == (is16BitMode() ?
'l' :
'w')) {
3509 Name = is16BitMode() ?
"movw" :
"movl";
3522 if ((
Name ==
"outb" ||
Name ==
"outsb" ||
Name ==
"outw" ||
Name ==
"outsw" ||
3541 bool HadVerifyError =
false;
3544 if (
Name.startswith(
"ins") &&
3549 AddDefaultSrcDestOperands(TmpOperands,
3551 DefaultMemDIOperand(NameLoc));
3552 HadVerifyError = VerifyAndAdjustOperands(
Operands, TmpOperands);
3556 if (
Name.startswith(
"outs") &&
3558 (
Name ==
"outsb" ||
Name ==
"outsw" ||
Name ==
"outsl" ||
3559 Name ==
"outsd" ||
Name ==
"outs")) {
3560 AddDefaultSrcDestOperands(TmpOperands, DefaultMemSIOperand(NameLoc),
3562 HadVerifyError = VerifyAndAdjustOperands(
Operands, TmpOperands);
3568 if (
Name.startswith(
"lods") &&
3570 (
Name ==
"lods" ||
Name ==
"lodsb" ||
Name ==
"lodsw" ||
3571 Name ==
"lodsl" ||
Name ==
"lodsd" ||
Name ==
"lodsq")) {
3572 TmpOperands.
push_back(DefaultMemSIOperand(NameLoc));
3573 HadVerifyError = VerifyAndAdjustOperands(
Operands, TmpOperands);
3579 if (
Name.startswith(
"stos") &&
3581 (
Name ==
"stos" ||
Name ==
"stosb" ||
Name ==
"stosw" ||
3582 Name ==
"stosl" ||
Name ==
"stosd" ||
Name ==
"stosq")) {
3583 TmpOperands.
push_back(DefaultMemDIOperand(NameLoc));
3584 HadVerifyError = VerifyAndAdjustOperands(
Operands, TmpOperands);
3590 if (
Name.startswith(
"scas") &&
3592 (
Name ==
"scas" ||
Name ==
"scasb" ||
Name ==
"scasw" ||
3593 Name ==
"scasl" ||
Name ==
"scasd" ||
Name ==
"scasq")) {
3594 TmpOperands.
push_back(DefaultMemDIOperand(NameLoc));
3595 HadVerifyError = VerifyAndAdjustOperands(
Operands, TmpOperands);
3599 if (
Name.startswith(
"cmps") &&
3601 (
Name ==
"cmps" ||
Name ==
"cmpsb" ||
Name ==
"cmpsw" ||
3602 Name ==
"cmpsl" ||
Name ==
"cmpsd" ||
Name ==
"cmpsq")) {
3603 AddDefaultSrcDestOperands(TmpOperands, DefaultMemDIOperand(NameLoc),
3604 DefaultMemSIOperand(NameLoc));
3605 HadVerifyError = VerifyAndAdjustOperands(
Operands, TmpOperands);
3609 if (((
Name.startswith(
"movs") &&
3610 (
Name ==
"movs" ||
Name ==
"movsb" ||
Name ==
"movsw" ||
3611 Name ==
"movsl" ||
Name ==
"movsd" ||
Name ==
"movsq")) ||
3612 (
Name.startswith(
"smov") &&
3613 (
Name ==
"smov" ||
Name ==
"smovb" ||
Name ==
"smovw" ||
3614 Name ==
"smovl" ||
Name ==
"smovd" ||
Name ==
"smovq"))) &&
3616 if (
Name ==
"movsd" &&
Operands.size() == 1 && !isParsingIntelSyntax())
3618 AddDefaultSrcDestOperands(TmpOperands, DefaultMemSIOperand(NameLoc),
3619 DefaultMemDIOperand(NameLoc));
3620 HadVerifyError = VerifyAndAdjustOperands(
Operands, TmpOperands);
3624 if (HadVerifyError) {
3625 return HadVerifyError;
3633 "size, (R|E)BX will be used for the location");
3645 if (ForcedVEXEncoding != VEXEncoding_VEX3 &&
3653 default:
return false;
3658 if (ForcedDispEncoding == DispEncoding_Disp32) {
3659 Inst.
setOpcode(is16BitMode() ? X86::JMP_2 : X86::JMP_4);
3668 if (ForcedDispEncoding == DispEncoding_Disp32) {
3669 Inst.
setOpcode(is16BitMode() ? X86::JCC_2 : X86::JCC_4);
3687 using namespace X86;
3696 return Warning(Ops[0]->getStartLoc(),
"Destination register should be "
3697 "distinct from source registers");
3709 return Warning(Ops[0]->getStartLoc(),
"Destination register should be "
3710 "distinct from source registers");
3711 }
else if (isV4FMADDPS(
Opcode) || isV4FMADDSS(
Opcode) ||
3716 unsigned Src2Enc =
MRI->getEncodingValue(Src2);
3717 if (Src2Enc % 4 != 0) {
3719 unsigned GroupStart = (Src2Enc / 4) * 4;
3720 unsigned GroupEnd = GroupStart + 3;
3721 return Warning(Ops[0]->getStartLoc(),
3722 "source register '" +
RegName +
"' implicitly denotes '" +
3727 }
else if (isVGATHERDPD(
Opcode) || isVGATHERDPS(
Opcode) ||
3734 unsigned Index =
MRI->getEncodingValue(
3737 return Warning(Ops[0]->getStartLoc(),
"index and destination registers "
3738 "should be distinct");
3742 unsigned Index =
MRI->getEncodingValue(
3744 if (Dest == Mask || Dest ==
Index || Mask ==
Index)
3745 return Warning(Ops[0]->getStartLoc(),
"mask, index, and destination "
3746 "registers should be distinct");
3756 for (
unsigned i = 0; i != NumOps; ++i) {
3761 if (Reg == X86::AH || Reg == X86::BH || Reg == X86::CH || Reg == X86::DH)
3768 if (UsesRex && HReg != X86::NoRegister) {
3770 return Error(Ops[0]->getStartLoc(),
3771 "can't encode '" +
RegName +
"' in an instruction requiring "
3776 if ((
Opcode == X86::PREFETCHIT0 ||
Opcode == X86::PREFETCHIT1)) {
3780 Ops[0]->getStartLoc(),
3782 :
"'prefetchit1'")) +
3783 " only supports RIP-relative address");
3788void X86AsmParser::emitWarningForSpecialLVIInstruction(
SMLoc Loc) {
3789 Warning(Loc,
"Instruction may be vulnerable to LVI and "
3790 "requires manual mitigation");
3791 Note(
SMLoc(),
"See https://software.intel.com/"
3792 "security-software-guidance/insights/"
3793 "deep-dive-load-value-injection#specialinstructions"
3794 " for more information");
3818 bool Parse32 = is32BitMode() || Code16GCC;
3820 is64BitMode() ? X86::RSP : (Parse32 ? X86::ESP : X86::SP);
3826 ShlMemOp->addMemOperands(ShlInst, 5);
3839 emitWarningForSpecialLVIInstruction(Inst.
getLoc());
3851void X86AsmParser::applyLVILoadHardeningMitigation(
MCInst &Inst,
3868 emitWarningForSpecialLVIInstruction(Inst.
getLoc());
3871 }
else if (
Opcode == X86::REP_PREFIX ||
Opcode == X86::REPNE_PREFIX) {
3874 emitWarningForSpecialLVIInstruction(Inst.
getLoc());
3896 getSTI().hasFeature(X86::FeatureLVIControlFlowIntegrity))
3897 applyLVICFIMitigation(Inst, Out);
3902 getSTI().hasFeature(X86::FeatureLVILoadHardening))
3903 applyLVILoadHardeningMitigation(Inst, Out);
3906bool X86AsmParser::MatchAndEmitInstruction(
SMLoc IDLoc,
unsigned &
Opcode,
3909 bool MatchingInlineAsm) {
3910 if (isParsingIntelSyntax())
3919 bool MatchingInlineAsm) {
3924 .
Case(
"finit",
"fninit")
3925 .
Case(
"fsave",
"fnsave")
3926 .
Case(
"fstcw",
"fnstcw")
3927 .
Case(
"fstcww",
"fnstcw")
3928 .
Case(
"fstenv",
"fnstenv")
3929 .
Case(
"fstsw",
"fnstsw")
3930 .
Case(
"fstsww",
"fnstsw")
3931 .
Case(
"fclex",
"fnclex")
3937 if (!MatchingInlineAsm)
3938 emitInstruction(Inst,
Operands, Out);
3943bool X86AsmParser::ErrorMissingFeature(
SMLoc IDLoc,
3945 bool MatchingInlineAsm) {
3946 assert(MissingFeatures.
any() &&
"Unknown missing feature!");
3949 OS <<
"instruction requires:";
3950 for (
unsigned i = 0, e = MissingFeatures.
size(); i != e; ++i) {
3951 if (MissingFeatures[i])
3958 unsigned Result = 0;
3960 if (Prefix.isPrefix()) {
3961 Result = Prefix.getPrefix();
3967unsigned X86AsmParser::checkTargetMatchPredicate(
MCInst &Inst) {
3972 return Match_Unsupported;
3974 if (ForcedVEXEncoding == VEXEncoding_EVEX &&
3976 return Match_Unsupported;
3978 if ((ForcedVEXEncoding == VEXEncoding_VEX ||
3979 ForcedVEXEncoding == VEXEncoding_VEX2 ||
3980 ForcedVEXEncoding == VEXEncoding_VEX3) &&
3982 return Match_Unsupported;
3986 (ForcedVEXEncoding != VEXEncoding_VEX &&
3987 ForcedVEXEncoding != VEXEncoding_VEX2 &&
3988 ForcedVEXEncoding != VEXEncoding_VEX3))
3989 return Match_Unsupported;
3991 return Match_Success;
3994bool X86AsmParser::MatchAndEmitATTInstruction(
SMLoc IDLoc,
unsigned &
Opcode,
3998 bool MatchingInlineAsm) {
4000 assert((*
Operands[0]).isToken() &&
"Leading operand should always be a mnemonic!");
4001 SMRange EmptyRange = std::nullopt;
4005 Out, MatchingInlineAsm);
4013 if (ForcedVEXEncoding == VEXEncoding_VEX)
4015 else if (ForcedVEXEncoding == VEXEncoding_VEX2)
4017 else if (ForcedVEXEncoding == VEXEncoding_VEX3)
4019 else if (ForcedVEXEncoding == VEXEncoding_EVEX)
4023 if (ForcedDispEncoding == DispEncoding_Disp8)
4025 else if (ForcedDispEncoding == DispEncoding_Disp32)
4033 if (ForcedDataPrefix == X86::Is32Bit)
4034 SwitchMode(X86::Is32Bit);
4038 MissingFeatures, MatchingInlineAsm,
4039 isParsingIntelSyntax());
4040 if (ForcedDataPrefix == X86::Is32Bit) {
4041 SwitchMode(X86::Is16Bit);
4042 ForcedDataPrefix = 0;
4044 switch (OriginalError) {
4047 if (!MatchingInlineAsm && validateInstruction(Inst,
Operands))
4052 if (!MatchingInlineAsm)
4053 while (processInstruction(Inst,
Operands))
4057 if (!MatchingInlineAsm)
4058 emitInstruction(Inst,
Operands, Out);
4061 case Match_InvalidImmUnsignedi4: {
4063 if (ErrorLoc ==
SMLoc())
4065 return Error(ErrorLoc,
"immediate must be an integer in range [0, 15]",
4066 EmptyRange, MatchingInlineAsm);
4068 case Match_MissingFeature:
4069 return ErrorMissingFeature(IDLoc, MissingFeatures, MatchingInlineAsm);
4070 case Match_InvalidOperand:
4071 case Match_MnemonicFail:
4072 case Match_Unsupported:
4075 if (
Op.getToken().empty()) {
4076 Error(IDLoc,
"instruction must have size higher than 0", EmptyRange,
4091 Op.setTokenValue(Tmp);
4099 const char *Suffixes =
Base[0] !=
'f' ?
"bwlq" :
"slt\0";
4101 const char *MemSize =
Base[0] !=
'f' ?
"\x08\x10\x20\x40" :
"\x20\x40\x50\0";
4113 bool HasVectorReg =
false;
4118 HasVectorReg =
true;
4119 else if (X86Op->
isMem()) {
4121 assert(
MemOp->Mem.Size == 0 &&
"Memory size always 0 under ATT syntax");
4128 for (
unsigned I = 0,
E = std::size(
Match);
I !=
E; ++
I) {
4129 Tmp.
back() = Suffixes[
I];
4130 if (
MemOp && HasVectorReg)
4131 MemOp->Mem.Size = MemSize[
I];
4132 Match[
I] = Match_MnemonicFail;
4133 if (
MemOp || !HasVectorReg) {
4135 MatchInstruction(
Operands, Inst, ErrorInfoIgnore, MissingFeatures,
4136 MatchingInlineAsm, isParsingIntelSyntax());
4138 if (
Match[
I] == Match_MissingFeature)
4139 ErrorInfoMissingFeatures = MissingFeatures;
4150 if (NumSuccessfulMatches == 1) {
4151 if (!MatchingInlineAsm && validateInstruction(Inst,
Operands))
4156 if (!MatchingInlineAsm)
4157 while (processInstruction(Inst,
Operands))
4161 if (!MatchingInlineAsm)
4162 emitInstruction(Inst,
Operands, Out);
4171 if (NumSuccessfulMatches > 1) {
4173 unsigned NumMatches = 0;
4174 for (
unsigned I = 0,
E = std::size(
Match);
I !=
E; ++
I)
4175 if (
Match[
I] == Match_Success)
4176 MatchChars[NumMatches++] = Suffixes[
I];
4180 OS <<
"ambiguous instructions require an explicit suffix (could be ";
4181 for (
unsigned i = 0; i != NumMatches; ++i) {
4184 if (i + 1 == NumMatches)
4186 OS <<
"'" <<
Base << MatchChars[i] <<
"'";
4189 Error(IDLoc,
OS.str(), EmptyRange, MatchingInlineAsm);
4198 if (OriginalError == Match_MnemonicFail)
4199 return Error(IDLoc,
"invalid instruction mnemonic '" +
Base +
"'",
4200 Op.getLocRange(), MatchingInlineAsm);
4202 if (OriginalError == Match_Unsupported)
4203 return Error(IDLoc,
"unsupported instruction", EmptyRange,
4206 assert(OriginalError == Match_InvalidOperand &&
"Unexpected error");
4210 return Error(IDLoc,
"too few operands for instruction", EmptyRange,
4217 OperandRange, MatchingInlineAsm);
4221 return Error(IDLoc,
"invalid operand for instruction", EmptyRange,
4227 return Error(IDLoc,
"unsupported instruction", EmptyRange,
4235 return ErrorMissingFeature(IDLoc, ErrorInfoMissingFeatures,
4242 return Error(IDLoc,
"invalid operand for instruction", EmptyRange,
4247 Error(IDLoc,
"unknown use of instruction mnemonic without a size suffix",
4248 EmptyRange, MatchingInlineAsm);
4252bool X86AsmParser::MatchAndEmitIntelInstruction(
SMLoc IDLoc,
unsigned &
Opcode,
4256 bool MatchingInlineAsm) {
4258 assert((*
Operands[0]).isToken() &&
"Leading operand should always be a mnemonic!");
4260 SMRange EmptyRange = std::nullopt;
4272 if (ForcedVEXEncoding == VEXEncoding_VEX)
4274 else if (ForcedVEXEncoding == VEXEncoding_VEX2)
4276 else if (ForcedVEXEncoding == VEXEncoding_VEX3)
4278 else if (ForcedVEXEncoding == VEXEncoding_EVEX)
4282 if (ForcedDispEncoding == DispEncoding_Disp8)
4284 else if (ForcedDispEncoding == DispEncoding_Disp32)
4295 UnsizedMemOp = X86Op;
4305 static const char *
const PtrSizedInstrs[] = {
"call",
"jmp",
"push"};
4306 for (
const char *Instr : PtrSizedInstrs) {
4307 if (Mnemonic == Instr) {
4308 UnsizedMemOp->
Mem.
Size = getPointerWidth();
4320 if (Mnemonic ==
"push" &&
Operands.size() == 2) {
4322 if (X86Op->
isImm()) {
4324 const auto *
CE = dyn_cast<MCConstantExpr>(X86Op->
getImm());
4325 unsigned Size = getPointerWidth();
4330 Tmp += (is64BitMode())
4332 : (is32BitMode()) ?
"l" : (is16BitMode()) ?
"w" :
" ";
4333 Op.setTokenValue(Tmp);
4336 MissingFeatures, MatchingInlineAsm,
4347 static const unsigned MopSizes[] = {8, 16, 32, 64, 80, 128, 256, 512};
4348 for (
unsigned Size : MopSizes) {
4352 unsigned M = MatchInstruction(
Operands, Inst, ErrorInfoIgnore,
4353 MissingFeatures, MatchingInlineAsm,
4354 isParsingIntelSyntax());
4359 if (
Match.back() == Match_MissingFeature)
4360 ErrorInfoMissingFeatures = MissingFeatures;
4370 if (
Match.empty()) {
4371 Match.push_back(MatchInstruction(
4373 isParsingIntelSyntax()));
4375 if (
Match.back() == Match_MissingFeature)
4376 ErrorInfoMissingFeatures = MissingFeatures;
4384 if (
Match.back() == Match_MnemonicFail) {
4385 return Error(IDLoc,
"invalid instruction mnemonic '" + Mnemonic +
"'",
4386 Op.getLocRange(), MatchingInlineAsm);
4393 if (UnsizedMemOp && NumSuccessfulMatches > 1 &&
4396 unsigned M = MatchInstruction(
4398 isParsingIntelSyntax());
4399 if (M == Match_Success)
4400 NumSuccessfulMatches = 1;
4412 if (NumSuccessfulMatches == 1) {
4413 if (!MatchingInlineAsm && validateInstruction(Inst,
Operands))
4418 if (!MatchingInlineAsm)
4419 while (processInstruction(Inst,
Operands))
4422 if (!MatchingInlineAsm)
4423 emitInstruction(Inst,
Operands, Out);
4426 }
else if (NumSuccessfulMatches > 1) {
4428 "multiple matches only possible with unsized memory operands");
4430 "ambiguous operand size for instruction '" + Mnemonic +
"\'",
4436 return Error(IDLoc,
"unsupported instruction", EmptyRange,
4444 return ErrorMissingFeature(IDLoc, ErrorInfoMissingFeatures,
4451 return Error(IDLoc,
"invalid operand for instruction", EmptyRange,
4457 if (ErrorLoc ==
SMLoc())
4459 return Error(ErrorLoc,
"immediate must be an integer in range [0, 15]",
4460 EmptyRange, MatchingInlineAsm);
4464 return Error(IDLoc,
"unknown instruction mnemonic", EmptyRange,
4468bool X86AsmParser::OmitRegisterFromClobberLists(
unsigned RegNo) {
4469 return X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(RegNo);
4472bool X86AsmParser::ParseDirective(
AsmToken DirectiveID) {
4476 return parseDirectiveArch();
4478 return ParseDirectiveCode(IDVal, DirectiveID.
getLoc());
4484 return Error(DirectiveID.
getLoc(),
"'.att_syntax noprefix' is not "
4485 "supported: registers must have a "
4486 "'%' prefix in .att_syntax");
4488 getParser().setAssemblerDialect(0);
4490 }
else if (IDVal.
startswith(
".intel_syntax")) {
4491 getParser().setAssemblerDialect(1);
4496 return Error(DirectiveID.
getLoc(),
"'.intel_syntax prefix' is not "
4497 "supported: registers must not have "
4498 "a '%' prefix in .intel_syntax");
4501 }
else if (IDVal ==
".nops")
4502 return parseDirectiveNops(DirectiveID.
getLoc());
4503 else if (IDVal ==
".even")
4504 return parseDirectiveEven(DirectiveID.
getLoc());
4505 else if (IDVal ==
".cv_fpo_proc")
4506 return parseDirectiveFPOProc(DirectiveID.
getLoc());
4507 else if (IDVal ==
".cv_fpo_setframe")
4508 return parseDirectiveFPOSetFrame(DirectiveID.
getLoc());
4509 else if (IDVal ==
".cv_fpo_pushreg")
4510 return parseDirectiveFPOPushReg(DirectiveID.
getLoc());
4511 else if (IDVal ==
".cv_fpo_stackalloc")
4512 return parseDirectiveFPOStackAlloc(DirectiveID.
getLoc());
4513 else if (IDVal ==
".cv_fpo_stackalign")
4514 return parseDirectiveFPOStackAlign(DirectiveID.
getLoc());
4515 else if (IDVal ==
".cv_fpo_endprologue")
4516 return parseDirectiveFPOEndPrologue(DirectiveID.
getLoc());
4517 else if (IDVal ==
".cv_fpo_endproc")
4518 return parseDirectiveFPOEndProc(DirectiveID.
getLoc());
4519 else if (IDVal ==
".seh_pushreg" ||
4521 return parseDirectiveSEHPushReg(DirectiveID.
getLoc());
4522 else if (IDVal ==
".seh_setframe" ||
4524 return parseDirectiveSEHSetFrame(DirectiveID.
getLoc());
4525 else if (IDVal ==
".seh_savereg" ||
4527 return parseDirectiveSEHSaveReg(DirectiveID.
getLoc());
4528 else if (IDVal ==
".seh_savexmm" ||
4530 return parseDirectiveSEHSaveXMM(DirectiveID.
getLoc());
4531 else if (IDVal ==
".seh_pushframe" ||
4533 return parseDirectiveSEHPushFrame(DirectiveID.
getLoc());
4538bool X86AsmParser::parseDirectiveArch() {
4540 getParser().parseStringToEndOfStatement();
4546bool X86AsmParser::parseDirectiveNops(
SMLoc L) {
4547 int64_t NumBytes = 0, Control = 0;
4548 SMLoc NumBytesLoc, ControlLoc;
4550 NumBytesLoc = getTok().getLoc();
4551 if (getParser().checkForValidSection() ||
4552 getParser().parseAbsoluteExpression(NumBytes))
4556 ControlLoc = getTok().getLoc();
4557 if (getParser().parseAbsoluteExpression(Control))
4560 if (getParser().parseEOL())
4563 if (NumBytes <= 0) {
4564 Error(NumBytesLoc,
"'.nops' directive with non-positive size");
4569 Error(ControlLoc,
"'.nops' directive with negative NOP size");
4574 getParser().getStreamer().emitNops(NumBytes, Control, L, STI);
4581bool X86AsmParser::parseDirectiveEven(
SMLoc L) {
4587 getStreamer().initSections(
false, getSTI());
4588 Section = getStreamer().getCurrentSectionOnly();
4591 getStreamer().emitCodeAlignment(
Align(2), &getSTI(), 0);
4593 getStreamer().emitValueToAlignment(
Align(2), 0, 1, 0);
4602 if (IDVal ==
".code16") {
4604 if (!is16BitMode()) {
4605 SwitchMode(X86::Is16Bit);
4606 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code16);
4608 }
else if (IDVal ==
".code16gcc") {
4612 if (!is16BitMode()) {
4613 SwitchMode(X86::Is16Bit);
4614 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code16);
4616 }
else if (IDVal ==
".code32") {
4618 if (!is32BitMode()) {
4619 SwitchMode(X86::Is32Bit);
4620 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code32);
4622 }
else if (IDVal ==
".code64") {
4624 if (!is64BitMode()) {
4625 SwitchMode(X86::Is64Bit);
4626 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code64);
4629 Error(L,
"unknown directive " + IDVal);
4637bool X86AsmParser::parseDirectiveFPOProc(
SMLoc L) {
4642 return Parser.
TokError(
"expected symbol name");
4643 if (Parser.
parseIntToken(ParamsSize,
"expected parameter byte count"))
4646 return Parser.
TokError(
"parameters size out of range");
4649 MCSymbol *ProcSym = getContext().getOrCreateSymbol(ProcName);
4650 return getTargetStreamer().emitFPOProc(ProcSym, ParamsSize, L);
4654bool X86AsmParser::parseDirectiveFPOSetFrame(
SMLoc L) {
4657 if (parseRegister(Reg, DummyLoc, DummyLoc) || parseEOL())
4659 return getTargetStreamer().emitFPOSetFrame(Reg, L);
4663bool X86AsmParser::parseDirectiveFPOPushReg(
SMLoc L) {
4666 if (parseRegister(Reg, DummyLoc, DummyLoc) || parseEOL())
4668 return getTargetStreamer().emitFPOPushReg(Reg, L);
4672bool X86AsmParser::parseDirectiveFPOStackAlloc(
SMLoc L) {
4677 return getTargetStreamer().emitFPOStackAlloc(
Offset, L);
4681bool X86AsmParser::parseDirectiveFPOStackAlign(
SMLoc L) {
4686 return getTargetStreamer().emitFPOStackAlign(
Offset, L);
4690bool X86AsmParser::parseDirectiveFPOEndPrologue(
SMLoc L) {
4694 return getTargetStreamer().emitFPOEndPrologue(L);
4698bool X86AsmParser::parseDirectiveFPOEndProc(
SMLoc L) {
4702 return getTargetStreamer().emitFPOEndProc(L);
4705bool X86AsmParser::parseSEHRegisterNumber(
unsigned RegClassID,
4707 SMLoc startLoc = getLexer().getLoc();
4713 if (parseRegister(RegNo, startLoc, endLoc))
4716 if (!X86MCRegisterClasses[RegClassID].
contains(RegNo)) {
4717 return Error(startLoc,
4718 "register is not supported for use with this directive");
4724 if (getParser().parseAbsoluteExpression(EncodedReg))
4730 for (
MCPhysReg Reg : X86MCRegisterClasses[RegClassID]) {
4731 if (
MRI->getEncodingValue(Reg) == EncodedReg) {
4737 return Error(startLoc,
4738 "incorrect register number for use with this directive");
4745bool X86AsmParser::parseDirectiveSEHPushReg(
SMLoc Loc) {
4747 if (parseSEHRegisterNumber(X86::GR64RegClassID, Reg))
4751 return TokError(
"expected end of directive");
4754 getStreamer().emitWinCFIPushReg(Reg, Loc);
4758bool X86AsmParser::parseDirectiveSEHSetFrame(
SMLoc Loc) {
4761 if (parseSEHRegisterNumber(X86::GR64RegClassID, Reg))
4764 return TokError(
"you must specify a stack pointer offset");
4767 if (getParser().parseAbsoluteExpression(Off))
4771 return TokError(
"expected end of directive");
4774 getStreamer().emitWinCFISetFrame(Reg, Off, Loc);
4778bool X86AsmParser::parseDirectiveSEHSaveReg(
SMLoc Loc) {
4781 if (parseSEHRegisterNumber(X86::GR64RegClassID, Reg))
4784 return TokError(
"you must specify an offset on the stack");
4787 if (getParser().parseAbsoluteExpression(Off))
4791 return TokError(
"expected end of directive");
4794 getStreamer().emitWinCFISaveReg(Reg, Off, Loc);
4798bool X86AsmParser::parseDirectiveSEHSaveXMM(
SMLoc Loc) {
4801 if (parseSEHRegisterNumber(X86::VR128XRegClassID, Reg))
4804 return TokError(
"you must specify an offset on the stack");
4807 if (getParser().parseAbsoluteExpression(Off))
4811 return TokError(
"expected end of directive");
4814 getStreamer().emitWinCFISaveXMM(Reg, Off, Loc);
4818bool X86AsmParser::parseDirectiveSEHPushFrame(
SMLoc Loc) {
4822 SMLoc startLoc = getLexer().getLoc();
4824 if (!getParser().parseIdentifier(CodeID)) {
4825 if (CodeID !=
"code")
4826 return Error(startLoc,
"expected @code");
4832 return TokError(
"expected end of directive");
4835 getStreamer().emitWinCFIPushFrame(Code, Loc);
4845#define GET_MATCHER_IMPLEMENTATION
4846#include "X86GenAsmMatcher.inc"
unsigned const MachineRegisterInfo * MRI
static const char * getSubtargetFeatureName(uint64_t Val)
static unsigned MatchRegisterName(StringRef Name)
static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
#define LLVM_EXTERNAL_VISIBILITY
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
amode Optimize addressing mode
static ModuleSymbolTable::Symbol getSym(DataRefImpl &Symb)
mir Rename Register Operands
static bool IsVCMP(unsigned Opcode)
static bool startswith(StringRef Magic, const char(&S)[N])
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallString class.
This file defines the SmallVector class.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static SymbolRef::Type getType(const Symbol *Sym)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
static cl::opt< bool > LVIInlineAsmHardening("x86-experimental-lvi-inline-asm-hardening", cl::desc("Harden inline assembly code that may be vulnerable to Load Value" " Injection (LVI). This feature is experimental."), cl::Hidden)
static bool checkScale(unsigned Scale, StringRef &ErrMsg)
LLVM_EXTERNAL_VISIBILITY void LLVMInitializeX86AsmParser()
static unsigned getPrefixes(OperandVector &Operands)
static bool CheckBaseRegAndIndexRegAndScale(unsigned BaseReg, unsigned IndexReg, unsigned Scale, bool Is64BitMode, StringRef &ErrMsg)
static unsigned getSize(unsigned Kind)
static constexpr uint32_t Opcode
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
Target independent representation for an assembler token.
int64_t getIntVal() const
bool isNot(TokenKind K) const
StringRef getString() const
Get the string for the current token, this includes all characters (for example, the quotes on string...
bool is(TokenKind K) const
TokenKind getKind() const
StringRef getIdentifier() const
Get the identifier string for the current token, which should be an identifier or a string.
This class represents an Operation in the Expression.
Base class for user error types.
Lightweight error class with error context and mandatory checking.
Container class for subtarget features.
constexpr size_t size() const
An instruction for ordering other memory operations.
Generic assembler lexer interface, for use by target specific assembly lexers.
void UnLex(AsmToken const &Token)
bool isNot(AsmToken::TokenKind K) const
Check if the current token has kind K.
MCStreamer & getStreamer()
MCAsmParser & getParser()
Generic assembler parser interface, for use by target specific assembly parsers.
virtual void eatToEndOfStatement()=0
Skip to the end of the current statement, for error recovery.
virtual MCStreamer & getStreamer()=0
Return the output streamer for the assembler.
virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc)=0
Parse an arbitrary expression.
virtual bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc, AsmTypeInfo *TypeInfo)=0
Parse a primary expression.
const AsmToken & getTok() const
Get the current AsmToken from the stream.
virtual bool isParsingMasm() const
virtual bool parseIdentifier(StringRef &Res)=0
Parse an identifier or string (as a quoted identifier) and set Res to the identifier contents.
bool parseOptionalToken(AsmToken::TokenKind T)
Attempt to parse and consume token, returning true on success.
bool parseIntToken(int64_t &V, const Twine &ErrMsg)
virtual const AsmToken & Lex()=0
Get the next AsmToken in the stream, possibly handling file inclusion first.
virtual unsigned getAssemblerDialect()
virtual void addAliasForDirective(StringRef Directive, StringRef Alias)=0
virtual bool lookUpType(StringRef Name, AsmTypeInfo &Info) const