54enum RegisterKind { IS_UNKNOWN,
IS_VGPR, IS_SGPR,
IS_AGPR, IS_TTMP, IS_SPECIAL };
68 SMLoc StartLoc, EndLoc;
69 const AMDGPUAsmParser *AsmParser;
72 AMDGPUOperand(KindTy Kind_,
const AMDGPUAsmParser *AsmParser_)
73 :
Kind(Kind_), AsmParser(AsmParser_) {}
75 using Ptr = std::unique_ptr<AMDGPUOperand>;
83 bool hasFPModifiers()
const {
return Abs || Neg; }
84 bool hasIntModifiers()
const {
return Sext; }
85 bool hasModifiers()
const {
return hasFPModifiers() || hasIntModifiers(); }
87 int64_t getFPModifiersOperand()
const {
94 int64_t getIntModifiersOperand()
const {
100 int64_t getModifiersOperand()
const {
101 assert(!(hasFPModifiers() && hasIntModifiers())
102 &&
"fp and int modifiers should not be used simultaneously");
103 if (hasFPModifiers())
104 return getFPModifiersOperand();
105 if (hasIntModifiers())
106 return getIntModifiersOperand();
187 ImmKindTyMandatoryLiteral,
201 mutable ImmKindTy
Kind;
218 bool isToken()
const override {
return Kind == Token; }
220 bool isSymbolRefExpr()
const {
221 return isExpr() && Expr && isa<MCSymbolRefExpr>(Expr);
224 bool isImm()
const override {
225 return Kind == Immediate;
228 void setImmKindNone()
const {
230 Imm.Kind = ImmKindTyNone;
233 void setImmKindLiteral()
const {
235 Imm.Kind = ImmKindTyLiteral;
238 void setImmKindMandatoryLiteral()
const {
240 Imm.Kind = ImmKindTyMandatoryLiteral;
243 void setImmKindConst()
const {
245 Imm.Kind = ImmKindTyConst;
248 bool IsImmKindLiteral()
const {
249 return isImm() &&
Imm.Kind == ImmKindTyLiteral;
252 bool IsImmKindMandatoryLiteral()
const {
253 return isImm() &&
Imm.Kind == ImmKindTyMandatoryLiteral;
256 bool isImmKindConst()
const {
257 return isImm() &&
Imm.Kind == ImmKindTyConst;
260 bool isInlinableImm(
MVT type)
const;
261 bool isLiteralImm(
MVT type)
const;
263 bool isRegKind()
const {
267 bool isReg()
const override {
268 return isRegKind() && !hasModifiers();
271 bool isRegOrInline(
unsigned RCID,
MVT type)
const {
272 return isRegClass(RCID) || isInlinableImm(type);
276 return isRegOrInline(RCID, type) || isLiteralImm(type);
279 bool isRegOrImmWithInt16InputMods()
const {
283 bool isRegOrImmWithIntT16InputMods()
const {
287 bool isRegOrImmWithInt32InputMods()
const {
291 bool isRegOrInlineImmWithInt16InputMods()
const {
292 return isRegOrInline(AMDGPU::VS_32RegClassID, MVT::i16);
295 bool isRegOrInlineImmWithInt32InputMods()
const {
296 return isRegOrInline(AMDGPU::VS_32RegClassID, MVT::i32);
299 bool isRegOrImmWithInt64InputMods()
const {
303 bool isRegOrImmWithFP16InputMods()
const {
307 bool isRegOrImmWithFPT16InputMods()
const {
311 bool isRegOrImmWithFP32InputMods()
const {
315 bool isRegOrImmWithFP64InputMods()
const {
319 template <
bool IsFake16>
bool isRegOrInlineImmWithFP16InputMods()
const {
320 return isRegOrInline(
321 IsFake16 ? AMDGPU::VS_32RegClassID : AMDGPU::VS_16RegClassID, MVT::f16);
324 bool isRegOrInlineImmWithFP32InputMods()
const {
325 return isRegOrInline(AMDGPU::VS_32RegClassID, MVT::f32);
328 bool isPackedFP16InputMods()
const {
332 bool isVReg()
const {
333 return isRegClass(AMDGPU::VGPR_32RegClassID) ||
334 isRegClass(AMDGPU::VReg_64RegClassID) ||
335 isRegClass(AMDGPU::VReg_96RegClassID) ||
336 isRegClass(AMDGPU::VReg_128RegClassID) ||
337 isRegClass(AMDGPU::VReg_160RegClassID) ||
338 isRegClass(AMDGPU::VReg_192RegClassID) ||
339 isRegClass(AMDGPU::VReg_256RegClassID) ||
340 isRegClass(AMDGPU::VReg_512RegClassID) ||
341 isRegClass(AMDGPU::VReg_1024RegClassID);
344 bool isVReg32()
const {
345 return isRegClass(AMDGPU::VGPR_32RegClassID);
348 bool isVReg32OrOff()
const {
349 return isOff() || isVReg32();
352 bool isNull()
const {
353 return isRegKind() &&
getReg() == AMDGPU::SGPR_NULL;
356 bool isVRegWithInputMods()
const;
357 template <
bool IsFake16>
bool isT16VRegWithInputMods()
const;
359 bool isSDWAOperand(
MVT type)
const;
360 bool isSDWAFP16Operand()
const;
361 bool isSDWAFP32Operand()
const;
362 bool isSDWAInt16Operand()
const;
363 bool isSDWAInt32Operand()
const;
365 bool isImmTy(ImmTy ImmT)
const {
369 template <ImmTy Ty>
bool isImmTy()
const {
return isImmTy(Ty); }
371 bool isImmLiteral()
const {
return isImmTy(ImmTyNone); }
373 bool isImmModifier()
const {
374 return isImm() &&
Imm.Type != ImmTyNone;
377 bool isOModSI()
const {
return isImmTy(ImmTyOModSI); }
378 bool isDim()
const {
return isImmTy(ImmTyDim); }
379 bool isR128A16()
const {
return isImmTy(ImmTyR128A16); }
380 bool isOff()
const {
return isImmTy(ImmTyOff); }
381 bool isExpTgt()
const {
return isImmTy(ImmTyExpTgt); }
382 bool isOffen()
const {
return isImmTy(ImmTyOffen); }
383 bool isIdxen()
const {
return isImmTy(ImmTyIdxen); }
384 bool isAddr64()
const {
return isImmTy(ImmTyAddr64); }
385 bool isSMEMOffsetMod()
const {
return isImmTy(ImmTySMEMOffsetMod); }
386 bool isFlatOffset()
const {
return isImmTy(ImmTyOffset) || isImmTy(ImmTyInstOffset); }
387 bool isGDS()
const {
return isImmTy(ImmTyGDS); }
388 bool isLDS()
const {
return isImmTy(ImmTyLDS); }
389 bool isCPol()
const {
return isImmTy(ImmTyCPol); }
390 bool isIndexKey8bit()
const {
return isImmTy(ImmTyIndexKey8bit); }
391 bool isIndexKey16bit()
const {
return isImmTy(ImmTyIndexKey16bit); }
392 bool isTFE()
const {
return isImmTy(ImmTyTFE); }
393 bool isFORMAT()
const {
return isImmTy(ImmTyFORMAT) && isUInt<7>(getImm()); }
394 bool isDppFI()
const {
return isImmTy(ImmTyDppFI); }
395 bool isSDWADstSel()
const {
return isImmTy(ImmTySDWADstSel); }
396 bool isSDWASrc0Sel()
const {
return isImmTy(ImmTySDWASrc0Sel); }
397 bool isSDWASrc1Sel()
const {
return isImmTy(ImmTySDWASrc1Sel); }
398 bool isSDWADstUnused()
const {
return isImmTy(ImmTySDWADstUnused); }
399 bool isInterpSlot()
const {
return isImmTy(ImmTyInterpSlot); }
400 bool isInterpAttr()
const {
return isImmTy(ImmTyInterpAttr); }
401 bool isInterpAttrChan()
const {
return isImmTy(ImmTyInterpAttrChan); }
402 bool isOpSel()
const {
return isImmTy(ImmTyOpSel); }
403 bool isOpSelHi()
const {
return isImmTy(ImmTyOpSelHi); }
404 bool isNegLo()
const {
return isImmTy(ImmTyNegLo); }
405 bool isNegHi()
const {
return isImmTy(ImmTyNegHi); }
407 bool isRegOrImm()
const {
411 bool isRegClass(
unsigned RCID)
const;
415 bool isRegOrInlineNoMods(
unsigned RCID,
MVT type)
const {
416 return isRegOrInline(RCID, type) && !hasModifiers();
419 bool isSCSrcB16()
const {
420 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i16);
423 bool isSCSrcV2B16()
const {
427 bool isSCSrc_b32()
const {
428 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i32);
431 bool isSCSrc_b64()
const {
432 return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::i64);
435 bool isBoolReg()
const;
437 bool isSCSrcF16()
const {
438 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f16);
441 bool isSCSrcV2F16()
const {
445 bool isSCSrcF32()
const {
446 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f32);
449 bool isSCSrcF64()
const {
450 return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::f64);
453 bool isSSrc_b32()
const {
454 return isSCSrc_b32() || isLiteralImm(MVT::i32) || isExpr();
457 bool isSSrc_b16()
const {
return isSCSrcB16() || isLiteralImm(MVT::i16); }
459 bool isSSrcV2B16()
const {
464 bool isSSrc_b64()
const {
467 return isSCSrc_b64() || isLiteralImm(MVT::i64);
470 bool isSSrc_f32()
const {
471 return isSCSrc_b32() || isLiteralImm(MVT::f32) || isExpr();
474 bool isSSrcF64()
const {
return isSCSrc_b64() || isLiteralImm(MVT::f64); }
476 bool isSSrc_bf16()
const {
return isSCSrcB16() || isLiteralImm(MVT::bf16); }
478 bool isSSrc_f16()
const {
return isSCSrcB16() || isLiteralImm(MVT::f16); }
480 bool isSSrcV2F16()
const {
485 bool isSSrcV2FP32()
const {
490 bool isSCSrcV2FP32()
const {
495 bool isSSrcV2INT32()
const {
500 bool isSCSrcV2INT32()
const {
502 return isSCSrc_b32();
505 bool isSSrcOrLds_b32()
const {
506 return isRegOrInlineNoMods(AMDGPU::SRegOrLds_32RegClassID, MVT::i32) ||
507 isLiteralImm(MVT::i32) || isExpr();
510 bool isVCSrc_b32()
const {
511 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i32);
514 bool isVCSrcB64()
const {
515 return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::i64);
518 bool isVCSrcTB16()
const {
519 return isRegOrInlineNoMods(AMDGPU::VS_16RegClassID, MVT::i16);
522 bool isVCSrcTB16_Lo128()
const {
523 return isRegOrInlineNoMods(AMDGPU::VS_16_Lo128RegClassID, MVT::i16);
526 bool isVCSrcFake16B16_Lo128()
const {
527 return isRegOrInlineNoMods(AMDGPU::VS_32_Lo128RegClassID, MVT::i16);
530 bool isVCSrc_b16()
const {
531 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i16);
534 bool isVCSrc_v2b16()
const {
return isVCSrc_b16(); }
536 bool isVCSrc_f32()
const {
537 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f32);
540 bool isVCSrcF64()
const {
541 return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::f64);
544 bool isVCSrcTBF16()
const {
545 return isRegOrInlineNoMods(AMDGPU::VS_16RegClassID, MVT::bf16);
548 bool isVCSrcTF16()
const {
549 return isRegOrInlineNoMods(AMDGPU::VS_16RegClassID, MVT::f16);
552 bool isVCSrcTBF16_Lo128()
const {
553 return isRegOrInlineNoMods(AMDGPU::VS_16_Lo128RegClassID, MVT::bf16);
556 bool isVCSrcTF16_Lo128()
const {
557 return isRegOrInlineNoMods(AMDGPU::VS_16_Lo128RegClassID, MVT::f16);
560 bool isVCSrcFake16BF16_Lo128()
const {
561 return isRegOrInlineNoMods(AMDGPU::VS_32_Lo128RegClassID, MVT::bf16);
564 bool isVCSrcFake16F16_Lo128()
const {
565 return isRegOrInlineNoMods(AMDGPU::VS_32_Lo128RegClassID, MVT::f16);
568 bool isVCSrc_bf16()
const {
569 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::bf16);
572 bool isVCSrc_f16()
const {
573 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f16);
576 bool isVCSrc_v2bf16()
const {
return isVCSrc_bf16(); }
578 bool isVCSrc_v2f16()
const {
return isVCSrc_f16(); }
580 bool isVSrc_b32()
const {
581 return isVCSrc_f32() || isLiteralImm(MVT::i32) || isExpr();
584 bool isVSrc_b64()
const {
return isVCSrcF64() || isLiteralImm(MVT::i64); }
586 bool isVSrcT_b16()
const {
return isVCSrcTB16() || isLiteralImm(MVT::i16); }
588 bool isVSrcT_b16_Lo128()
const {
589 return isVCSrcTB16_Lo128() || isLiteralImm(MVT::i16);
592 bool isVSrcFake16_b16_Lo128()
const {
593 return isVCSrcFake16B16_Lo128() || isLiteralImm(MVT::i16);
596 bool isVSrc_b16()
const {
return isVCSrc_b16() || isLiteralImm(MVT::i16); }
598 bool isVSrc_v2b16()
const {
return isVSrc_b16() || isLiteralImm(MVT::v2i16); }
600 bool isVCSrcV2FP32()
const {
604 bool isVSrc_v2f32()
const {
return isVSrc_f64() || isLiteralImm(MVT::v2f32); }
606 bool isVCSrcV2INT32()
const {
610 bool isVSrc_v2b32()
const {
return isVSrc_b64() || isLiteralImm(MVT::v2i32); }
612 bool isVSrc_f32()
const {
613 return isVCSrc_f32() || isLiteralImm(MVT::f32) || isExpr();
616 bool isVSrc_f64()
const {
return isVCSrcF64() || isLiteralImm(MVT::f64); }
618 bool isVSrcT_bf16()
const {
return isVCSrcTBF16() || isLiteralImm(MVT::bf16); }
620 bool isVSrcT_f16()
const {
return isVCSrcTF16() || isLiteralImm(MVT::f16); }
622 bool isVSrcT_bf16_Lo128()
const {
623 return isVCSrcTBF16_Lo128() || isLiteralImm(MVT::bf16);
626 bool isVSrcT_f16_Lo128()
const {
627 return isVCSrcTF16_Lo128() || isLiteralImm(MVT::f16);
630 bool isVSrcFake16_bf16_Lo128()
const {
631 return isVCSrcFake16BF16_Lo128() || isLiteralImm(MVT::bf16);
634 bool isVSrcFake16_f16_Lo128()
const {
635 return isVCSrcFake16F16_Lo128() || isLiteralImm(MVT::f16);
638 bool isVSrc_bf16()
const {
return isVCSrc_bf16() || isLiteralImm(MVT::bf16); }
640 bool isVSrc_f16()
const {
return isVCSrc_f16() || isLiteralImm(MVT::f16); }
642 bool isVSrc_v2bf16()
const {
643 return isVSrc_bf16() || isLiteralImm(MVT::v2bf16);
646 bool isVSrc_v2f16()
const {
return isVSrc_f16() || isLiteralImm(MVT::v2f16); }
648 bool isVISrcB32()
const {
649 return isRegOrInlineNoMods(AMDGPU::VGPR_32RegClassID, MVT::i32);
652 bool isVISrcB16()
const {
653 return isRegOrInlineNoMods(AMDGPU::VGPR_32RegClassID, MVT::i16);
656 bool isVISrcV2B16()
const {
660 bool isVISrcF32()
const {
661 return isRegOrInlineNoMods(AMDGPU::VGPR_32RegClassID, MVT::f32);
664 bool isVISrcF16()
const {
665 return isRegOrInlineNoMods(AMDGPU::VGPR_32RegClassID, MVT::f16);
668 bool isVISrcV2F16()
const {
669 return isVISrcF16() || isVISrcB32();
672 bool isVISrc_64_bf16()
const {
673 return isRegOrInlineNoMods(AMDGPU::VReg_64RegClassID, MVT::bf16);
676 bool isVISrc_64_f16()
const {
677 return isRegOrInlineNoMods(AMDGPU::VReg_64RegClassID, MVT::f16);
680 bool isVISrc_64_b32()
const {
681 return isRegOrInlineNoMods(AMDGPU::VReg_64RegClassID, MVT::i32);
684 bool isVISrc_64B64()
const {
685 return isRegOrInlineNoMods(AMDGPU::VReg_64RegClassID, MVT::i64);
688 bool isVISrc_64_f64()
const {
689 return isRegOrInlineNoMods(AMDGPU::VReg_64RegClassID, MVT::f64);
692 bool isVISrc_64V2FP32()
const {
693 return isRegOrInlineNoMods(AMDGPU::VReg_64RegClassID, MVT::f32);
696 bool isVISrc_64V2INT32()
const {
697 return isRegOrInlineNoMods(AMDGPU::VReg_64RegClassID, MVT::i32);
700 bool isVISrc_256_b32()
const {
701 return isRegOrInlineNoMods(AMDGPU::VReg_256RegClassID, MVT::i32);
704 bool isVISrc_256_f32()
const {
705 return isRegOrInlineNoMods(AMDGPU::VReg_256RegClassID, MVT::f32);
708 bool isVISrc_256B64()
const {
709 return isRegOrInlineNoMods(AMDGPU::VReg_256RegClassID, MVT::i64);
712 bool isVISrc_256_f64()
const {
713 return isRegOrInlineNoMods(AMDGPU::VReg_256RegClassID, MVT::f64);
716 bool isVISrc_128B16()
const {
717 return isRegOrInlineNoMods(AMDGPU::VReg_128RegClassID, MVT::i16);
720 bool isVISrc_128V2B16()
const {
721 return isVISrc_128B16();
724 bool isVISrc_128_b32()
const {
725 return isRegOrInlineNoMods(AMDGPU::VReg_128RegClassID, MVT::i32);
728 bool isVISrc_128_f32()
const {
729 return isRegOrInlineNoMods(AMDGPU::VReg_128RegClassID, MVT::f32);
732 bool isVISrc_256V2FP32()
const {
733 return isRegOrInlineNoMods(AMDGPU::VReg_256RegClassID, MVT::f32);
736 bool isVISrc_256V2INT32()
const {
737 return isRegOrInlineNoMods(AMDGPU::VReg_256RegClassID, MVT::i32);
740 bool isVISrc_512_b32()
const {
741 return isRegOrInlineNoMods(AMDGPU::VReg_512RegClassID, MVT::i32);
744 bool isVISrc_512B16()
const {
745 return isRegOrInlineNoMods(AMDGPU::VReg_512RegClassID, MVT::i16);
748 bool isVISrc_512V2B16()
const {
749 return isVISrc_512B16();
752 bool isVISrc_512_f32()
const {
753 return isRegOrInlineNoMods(AMDGPU::VReg_512RegClassID, MVT::f32);
756 bool isVISrc_512F16()
const {
757 return isRegOrInlineNoMods(AMDGPU::VReg_512RegClassID, MVT::f16);
760 bool isVISrc_512V2F16()
const {
761 return isVISrc_512F16() || isVISrc_512_b32();
764 bool isVISrc_1024_b32()
const {
765 return isRegOrInlineNoMods(AMDGPU::VReg_1024RegClassID, MVT::i32);
768 bool isVISrc_1024B16()
const {
769 return isRegOrInlineNoMods(AMDGPU::VReg_1024RegClassID, MVT::i16);
772 bool isVISrc_1024V2B16()
const {
773 return isVISrc_1024B16();
776 bool isVISrc_1024_f32()
const {
777 return isRegOrInlineNoMods(AMDGPU::VReg_1024RegClassID, MVT::f32);
780 bool isVISrc_1024F16()
const {
781 return isRegOrInlineNoMods(AMDGPU::VReg_1024RegClassID, MVT::f16);
784 bool isVISrc_1024V2F16()
const {
785 return isVISrc_1024F16() || isVISrc_1024_b32();
788 bool isAISrcB32()
const {
789 return isRegOrInlineNoMods(AMDGPU::AGPR_32RegClassID, MVT::i32);
792 bool isAISrcB16()
const {
793 return isRegOrInlineNoMods(AMDGPU::AGPR_32RegClassID, MVT::i16);
796 bool isAISrcV2B16()
const {
800 bool isAISrcF32()
const {
801 return isRegOrInlineNoMods(AMDGPU::AGPR_32RegClassID, MVT::f32);
804 bool isAISrcF16()
const {
805 return isRegOrInlineNoMods(AMDGPU::AGPR_32RegClassID, MVT::f16);
808 bool isAISrcV2F16()
const {
809 return isAISrcF16() || isAISrcB32();
812 bool isAISrc_64B64()
const {
813 return isRegOrInlineNoMods(AMDGPU::AReg_64RegClassID, MVT::i64);
816 bool isAISrc_64_f64()
const {
817 return isRegOrInlineNoMods(AMDGPU::AReg_64RegClassID, MVT::f64);
820 bool isAISrc_128_b32()
const {
821 return isRegOrInlineNoMods(AMDGPU::AReg_128RegClassID, MVT::i32);
824 bool isAISrc_128B16()
const {
825 return isRegOrInlineNoMods(AMDGPU::AReg_128RegClassID, MVT::i16);
828 bool isAISrc_128V2B16()
const {
829 return isAISrc_128B16();
832 bool isAISrc_128_f32()
const {
833 return isRegOrInlineNoMods(AMDGPU::AReg_128RegClassID, MVT::f32);
836 bool isAISrc_128F16()
const {
837 return isRegOrInlineNoMods(AMDGPU::AReg_128RegClassID, MVT::f16);
840 bool isAISrc_128V2F16()
const {
841 return isAISrc_128F16() || isAISrc_128_b32();
844 bool isVISrc_128_bf16()
const {
845 return isRegOrInlineNoMods(AMDGPU::VReg_128RegClassID, MVT::bf16);
848 bool isVISrc_128_f16()
const {
849 return isRegOrInlineNoMods(AMDGPU::VReg_128RegClassID, MVT::f16);
852 bool isVISrc_128V2F16()
const {
853 return isVISrc_128_f16() || isVISrc_128_b32();
856 bool isAISrc_256B64()
const {
857 return isRegOrInlineNoMods(AMDGPU::AReg_256RegClassID, MVT::i64);
860 bool isAISrc_256_f64()
const {
861 return isRegOrInlineNoMods(AMDGPU::AReg_256RegClassID, MVT::f64);
864 bool isAISrc_512_b32()
const {
865 return isRegOrInlineNoMods(AMDGPU::AReg_512RegClassID, MVT::i32);
868 bool isAISrc_512B16()
const {
869 return isRegOrInlineNoMods(AMDGPU::AReg_512RegClassID, MVT::i16);
872 bool isAISrc_512V2B16()
const {
873 return isAISrc_512B16();
876 bool isAISrc_512_f32()
const {
877 return isRegOrInlineNoMods(AMDGPU::AReg_512RegClassID, MVT::f32);
880 bool isAISrc_512F16()
const {
881 return isRegOrInlineNoMods(AMDGPU::AReg_512RegClassID, MVT::f16);
884 bool isAISrc_512V2F16()
const {
885 return isAISrc_512F16() || isAISrc_512_b32();
888 bool isAISrc_1024_b32()
const {
889 return isRegOrInlineNoMods(AMDGPU::AReg_1024RegClassID, MVT::i32);
892 bool isAISrc_1024B16()
const {
893 return isRegOrInlineNoMods(AMDGPU::AReg_1024RegClassID, MVT::i16);
896 bool isAISrc_1024V2B16()
const {
897 return isAISrc_1024B16();
900 bool isAISrc_1024_f32()
const {
901 return isRegOrInlineNoMods(AMDGPU::AReg_1024RegClassID, MVT::f32);
904 bool isAISrc_1024F16()
const {
905 return isRegOrInlineNoMods(AMDGPU::AReg_1024RegClassID, MVT::f16);
908 bool isAISrc_1024V2F16()
const {
909 return isAISrc_1024F16() || isAISrc_1024_b32();
912 bool isKImmFP32()
const {
913 return isLiteralImm(MVT::f32);
916 bool isKImmFP16()
const {
917 return isLiteralImm(MVT::f16);
920 bool isMem()
const override {
924 bool isExpr()
const {
928 bool isSOPPBrTarget()
const {
return isExpr() ||
isImm(); }
930 bool isSWaitCnt()
const;
931 bool isDepCtr()
const;
932 bool isSDelayALU()
const;
933 bool isHwreg()
const;
934 bool isSendMsg()
const;
935 bool isSplitBarrier()
const;
936 bool isSwizzle()
const;
937 bool isSMRDOffset8()
const;
938 bool isSMEMOffset()
const;
939 bool isSMRDLiteralOffset()
const;
941 bool isDPPCtrl()
const;
943 bool isGPRIdxMode()
const;
944 bool isS16Imm()
const;
945 bool isU16Imm()
const;
946 bool isEndpgm()
const;
948 auto getPredicate(std::function<
bool(
const AMDGPUOperand &
Op)>
P)
const {
949 return [=](){
return P(*
this); };
957 int64_t getImm()
const {
962 void setImm(int64_t Val) {
967 ImmTy getImmTy()
const {
977 SMLoc getStartLoc()
const override {
981 SMLoc getEndLoc()
const override {
986 return SMRange(StartLoc, EndLoc);
989 Modifiers getModifiers()
const {
990 assert(isRegKind() || isImmTy(ImmTyNone));
991 return isRegKind() ?
Reg.Mods :
Imm.Mods;
994 void setModifiers(Modifiers Mods) {
995 assert(isRegKind() || isImmTy(ImmTyNone));
1002 bool hasModifiers()
const {
1003 return getModifiers().hasModifiers();
1006 bool hasFPModifiers()
const {
1007 return getModifiers().hasFPModifiers();
1010 bool hasIntModifiers()
const {
1011 return getModifiers().hasIntModifiers();
1016 void addImmOperands(
MCInst &Inst,
unsigned N,
bool ApplyModifiers =
true)
const;
1018 void addLiteralImmOperand(
MCInst &Inst, int64_t Val,
bool ApplyModifiers)
const;
1020 void addRegOperands(
MCInst &Inst,
unsigned N)
const;
1022 void addRegOrImmOperands(
MCInst &Inst,
unsigned N)
const {
1024 addRegOperands(Inst,
N);
1026 addImmOperands(Inst,
N);
1029 void addRegOrImmWithInputModsOperands(
MCInst &Inst,
unsigned N)
const {
1030 Modifiers Mods = getModifiers();
1033 addRegOperands(Inst,
N);
1035 addImmOperands(Inst,
N,
false);
1039 void addRegOrImmWithFPInputModsOperands(
MCInst &Inst,
unsigned N)
const {
1040 assert(!hasIntModifiers());
1041 addRegOrImmWithInputModsOperands(Inst,
N);
1044 void addRegOrImmWithIntInputModsOperands(
MCInst &Inst,
unsigned N)
const {
1045 assert(!hasFPModifiers());
1046 addRegOrImmWithInputModsOperands(Inst,
N);
1049 void addRegWithInputModsOperands(
MCInst &Inst,
unsigned N)
const {
1050 Modifiers Mods = getModifiers();
1053 addRegOperands(Inst,
N);
1056 void addRegWithFPInputModsOperands(
MCInst &Inst,
unsigned N)
const {
1057 assert(!hasIntModifiers());
1058 addRegWithInputModsOperands(Inst,
N);
1061 void addRegWithIntInputModsOperands(
MCInst &Inst,
unsigned N)
const {
1062 assert(!hasFPModifiers());
1063 addRegWithInputModsOperands(Inst,
N);
1069 case ImmTyNone:
OS <<
"None";
break;
1070 case ImmTyGDS:
OS <<
"GDS";
break;
1071 case ImmTyLDS:
OS <<
"LDS";
break;
1072 case ImmTyOffen:
OS <<
"Offen";
break;
1073 case ImmTyIdxen:
OS <<
"Idxen";
break;
1074 case ImmTyAddr64:
OS <<
"Addr64";
break;
1075 case ImmTyOffset:
OS <<
"Offset";
break;
1076 case ImmTyInstOffset:
OS <<
"InstOffset";
break;
1077 case ImmTyOffset0:
OS <<
"Offset0";
break;
1078 case ImmTyOffset1:
OS <<
"Offset1";
break;
1079 case ImmTySMEMOffsetMod:
OS <<
"SMEMOffsetMod";
break;
1080 case ImmTyCPol:
OS <<
"CPol";
break;
1081 case ImmTyIndexKey8bit:
OS <<
"index_key";
break;
1082 case ImmTyIndexKey16bit:
OS <<
"index_key";
break;
1083 case ImmTyTFE:
OS <<
"TFE";
break;
1084 case ImmTyD16:
OS <<
"D16";
break;
1085 case ImmTyFORMAT:
OS <<
"FORMAT";
break;
1086 case ImmTyClamp:
OS <<
"Clamp";
break;
1087 case ImmTyOModSI:
OS <<
"OModSI";
break;
1088 case ImmTyDPP8:
OS <<
"DPP8";
break;
1089 case ImmTyDppCtrl:
OS <<
"DppCtrl";
break;
1090 case ImmTyDppRowMask:
OS <<
"DppRowMask";
break;
1091 case ImmTyDppBankMask:
OS <<
"DppBankMask";
break;
1092 case ImmTyDppBoundCtrl:
OS <<
"DppBoundCtrl";
break;
1093 case ImmTyDppFI:
OS <<
"DppFI";
break;
1094 case ImmTySDWADstSel:
OS <<
"SDWADstSel";
break;
1095 case ImmTySDWASrc0Sel:
OS <<
"SDWASrc0Sel";
break;
1096 case ImmTySDWASrc1Sel:
OS <<
"SDWASrc1Sel";
break;
1097 case ImmTySDWADstUnused:
OS <<
"SDWADstUnused";
break;
1098 case ImmTyDMask:
OS <<
"DMask";
break;
1099 case ImmTyDim:
OS <<
"Dim";
break;
1100 case ImmTyUNorm:
OS <<
"UNorm";
break;
1101 case ImmTyDA:
OS <<
"DA";
break;
1102 case ImmTyR128A16:
OS <<
"R128A16";
break;
1103 case ImmTyA16:
OS <<
"A16";
break;
1104 case ImmTyLWE:
OS <<
"LWE";
break;
1105 case ImmTyOff:
OS <<
"Off";
break;
1106 case ImmTyExpTgt:
OS <<
"ExpTgt";
break;
1107 case ImmTyExpCompr:
OS <<
"ExpCompr";
break;
1108 case ImmTyExpVM:
OS <<
"ExpVM";
break;
1109 case ImmTyHwreg:
OS <<
"Hwreg";
break;
1110 case ImmTySendMsg:
OS <<
"SendMsg";
break;
1111 case ImmTyInterpSlot:
OS <<
"InterpSlot";
break;
1112 case ImmTyInterpAttr:
OS <<
"InterpAttr";
break;
1113 case ImmTyInterpAttrChan:
OS <<
"InterpAttrChan";
break;
1114 case ImmTyOpSel:
OS <<
"OpSel";
break;
1115 case ImmTyOpSelHi:
OS <<
"OpSelHi";
break;
1116 case ImmTyNegLo:
OS <<
"NegLo";
break;
1117 case ImmTyNegHi:
OS <<
"NegHi";
break;
1118 case ImmTySwizzle:
OS <<
"Swizzle";
break;
1119 case ImmTyGprIdxMode:
OS <<
"GprIdxMode";
break;
1120 case ImmTyHigh:
OS <<
"High";
break;
1121 case ImmTyBLGP:
OS <<
"BLGP";
break;
1122 case ImmTyCBSZ:
OS <<
"CBSZ";
break;
1123 case ImmTyABID:
OS <<
"ABID";
break;
1124 case ImmTyEndpgm:
OS <<
"Endpgm";
break;
1125 case ImmTyWaitVDST:
OS <<
"WaitVDST";
break;
1126 case ImmTyWaitEXP:
OS <<
"WaitEXP";
break;
1127 case ImmTyWaitVAVDst:
OS <<
"WaitVAVDst";
break;
1128 case ImmTyWaitVMVSrc:
OS <<
"WaitVMVSrc";
break;
1129 case ImmTyByteSel:
OS <<
"ByteSel" ;
break;
1138 <<
" mods: " <<
Reg.Mods <<
'>';
1141 OS <<
'<' << getImm();
1142 if (getImmTy() != ImmTyNone) {
1143 OS <<
" type: "; printImmTy(
OS, getImmTy());
1145 OS <<
" mods: " <<
Imm.Mods <<
'>';
1148 OS <<
'\'' << getToken() <<
'\'';
1151 OS <<
"<expr " << *Expr <<
'>';
1156 static AMDGPUOperand::Ptr CreateImm(
const AMDGPUAsmParser *AsmParser,
1157 int64_t Val,
SMLoc Loc,
1158 ImmTy
Type = ImmTyNone,
1159 bool IsFPImm =
false) {
1160 auto Op = std::make_unique<AMDGPUOperand>(Immediate, AsmParser);
1162 Op->Imm.IsFPImm = IsFPImm;
1163 Op->Imm.Kind = ImmKindTyNone;
1165 Op->Imm.Mods = Modifiers();
1171 static AMDGPUOperand::Ptr CreateToken(
const AMDGPUAsmParser *AsmParser,
1173 bool HasExplicitEncodingSize =
true) {
1174 auto Res = std::make_unique<AMDGPUOperand>(Token, AsmParser);
1175 Res->Tok.Data = Str.data();
1176 Res->Tok.Length = Str.size();
1177 Res->StartLoc = Loc;
1182 static AMDGPUOperand::Ptr CreateReg(
const AMDGPUAsmParser *AsmParser,
1183 unsigned RegNo,
SMLoc S,
1185 auto Op = std::make_unique<AMDGPUOperand>(
Register, AsmParser);
1186 Op->Reg.RegNo = RegNo;
1187 Op->Reg.Mods = Modifiers();
1193 static AMDGPUOperand::Ptr CreateExpr(
const AMDGPUAsmParser *AsmParser,
1195 auto Op = std::make_unique<AMDGPUOperand>(
Expression, AsmParser);
1204 OS <<
"abs:" << Mods.Abs <<
" neg: " << Mods.Neg <<
" sext:" << Mods.Sext;
1215class KernelScopeInfo {
1216 int SgprIndexUnusedMin = -1;
1217 int VgprIndexUnusedMin = -1;
1218 int AgprIndexUnusedMin = -1;
1222 void usesSgprAt(
int i) {
1223 if (i >= SgprIndexUnusedMin) {
1224 SgprIndexUnusedMin = ++i;
1233 void usesVgprAt(
int i) {
1234 if (i >= VgprIndexUnusedMin) {
1235 VgprIndexUnusedMin = ++i;
1240 VgprIndexUnusedMin);
1246 void usesAgprAt(
int i) {
1251 if (i >= AgprIndexUnusedMin) {
1252 AgprIndexUnusedMin = ++i;
1262 VgprIndexUnusedMin);
1269 KernelScopeInfo() =
default;
1275 usesSgprAt(SgprIndexUnusedMin = -1);
1276 usesVgprAt(VgprIndexUnusedMin = -1);
1278 usesAgprAt(AgprIndexUnusedMin = -1);
1282 void usesRegister(RegisterKind RegKind,
unsigned DwordRegIndex,
1283 unsigned RegWidth) {
1286 usesSgprAt(DwordRegIndex +
divideCeil(RegWidth, 32) - 1);
1289 usesAgprAt(DwordRegIndex +
divideCeil(RegWidth, 32) - 1);
1292 usesVgprAt(DwordRegIndex +
divideCeil(RegWidth, 32) - 1);
1303 unsigned ForcedEncodingSize = 0;
1304 bool ForcedDPP =
false;
1305 bool ForcedSDWA =
false;
1306 KernelScopeInfo KernelScope;
1311#define GET_ASSEMBLER_HEADER
1312#include "AMDGPUGenAsmMatcher.inc"
1317 void createConstantSymbol(
StringRef Id, int64_t Val);
1319 bool ParseAsAbsoluteExpression(
uint32_t &Ret);
1337 const MCExpr *FlatScrUsed,
bool XNACKUsed,
1338 std::optional<bool> EnableWavefrontSize32,
1342 bool ParseDirectiveAMDGCNTarget();
1343 bool ParseDirectiveAMDHSACodeObjectVersion();
1344 bool ParseDirectiveAMDHSAKernel();
1346 bool ParseDirectiveAMDKernelCodeT();
1349 bool ParseDirectiveAMDGPUHsaKernel();
1351 bool ParseDirectiveISAVersion();
1352 bool ParseDirectiveHSAMetadata();
1353 bool ParseDirectivePALMetadataBegin();
1354 bool ParseDirectivePALMetadata();
1355 bool ParseDirectiveAMDGPULDS();
1359 bool ParseToEndDirective(
const char *AssemblerDirectiveBegin,
1360 const char *AssemblerDirectiveEnd,
1361 std::string &CollectString);
1363 bool AddNextRegisterToList(
unsigned& Reg,
unsigned& RegWidth,
1364 RegisterKind RegKind,
unsigned Reg1,
SMLoc Loc);
1365 bool ParseAMDGPURegister(RegisterKind &RegKind,
unsigned &Reg,
1366 unsigned &RegNum,
unsigned &RegWidth,
1367 bool RestoreOnFailure =
false);
1368 bool ParseAMDGPURegister(RegisterKind &RegKind,
unsigned &Reg,
1369 unsigned &RegNum,
unsigned &RegWidth,
1371 unsigned ParseRegularReg(RegisterKind &RegKind,
unsigned &RegNum,
1374 unsigned ParseSpecialReg(RegisterKind &RegKind,
unsigned &RegNum,
1377 unsigned ParseRegList(RegisterKind &RegKind,
unsigned &RegNum,
1379 bool ParseRegRange(
unsigned& Num,
unsigned& Width);
1380 unsigned getRegularReg(RegisterKind RegKind,
unsigned RegNum,
unsigned SubReg,
1381 unsigned RegWidth,
SMLoc Loc);
1385 std::optional<StringRef> getGprCountSymbolName(RegisterKind RegKind);
1386 void initializeGprCountSymbol(RegisterKind RegKind);
1387 bool updateGprCountSymbols(RegisterKind RegKind,
unsigned DwordRegIndex,
1394 OperandMode_Default,
1398 using OptionalImmIndexMap = std::map<AMDGPUOperand::ImmTy, unsigned>;
1406 if (getFeatureBits().
none()) {
1412 if (!FB[AMDGPU::FeatureWavefrontSize64] &&
1413 !FB[AMDGPU::FeatureWavefrontSize32]) {
1424 createConstantSymbol(
".amdgcn.gfx_generation_number",
ISA.Major);
1425 createConstantSymbol(
".amdgcn.gfx_generation_minor",
ISA.Minor);
1426 createConstantSymbol(
".amdgcn.gfx_generation_stepping",
ISA.Stepping);
1428 createConstantSymbol(
".option.machine_version_major",
ISA.Major);
1429 createConstantSymbol(
".option.machine_version_minor",
ISA.Minor);
1430 createConstantSymbol(
".option.machine_version_stepping",
ISA.Stepping);
1433 initializeGprCountSymbol(IS_VGPR);
1434 initializeGprCountSymbol(IS_SGPR);
1439 createConstantSymbol(Symbol, Code);
1441 createConstantSymbol(
"UC_VERSION_W64_BIT", 0x2000);
1442 createConstantSymbol(
"UC_VERSION_W32_BIT", 0x4000);
1443 createConstantSymbol(
"UC_VERSION_MDP_BIT", 0x8000);
1513 bool hasInv2PiInlineImm()
const {
1514 return getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm];
1517 bool hasFlatOffsets()
const {
1518 return getFeatureBits()[AMDGPU::FeatureFlatInstOffsets];
1522 return getFeatureBits()[AMDGPU::FeatureArchitectedFlatScratch];
1525 bool hasSGPR102_SGPR103()
const {
1529 bool hasSGPR104_SGPR105()
const {
return isGFX10Plus(); }
1531 bool hasIntClamp()
const {
1532 return getFeatureBits()[AMDGPU::FeatureIntClamp];
1535 bool hasPartialNSAEncoding()
const {
1536 return getFeatureBits()[AMDGPU::FeaturePartialNSAEncoding];
1568 void setForcedEncodingSize(
unsigned Size) { ForcedEncodingSize =
Size; }
1569 void setForcedDPP(
bool ForceDPP_) { ForcedDPP = ForceDPP_; }
1570 void setForcedSDWA(
bool ForceSDWA_) { ForcedSDWA = ForceSDWA_; }
1572 unsigned getForcedEncodingSize()
const {
return ForcedEncodingSize; }
1573 bool isForcedVOP3()
const {
return ForcedEncodingSize == 64; }
1574 bool isForcedDPP()
const {
return ForcedDPP; }
1575 bool isForcedSDWA()
const {
return ForcedSDWA; }
1577 StringRef getMatchedVariantName()
const;
1579 std::unique_ptr<AMDGPUOperand>
parseRegister(
bool RestoreOnFailure =
false);
1581 bool RestoreOnFailure);
1584 SMLoc &EndLoc)
override;
1587 unsigned Kind)
override;
1591 bool MatchingInlineAsm)
override;
1594 OperandMode Mode = OperandMode_Default);
1602 ParseStatus parseIntWithPrefix(
const char *Prefix, int64_t &
Int);
1606 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
1607 std::function<
bool(int64_t &)> ConvertResult =
nullptr);
1611 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
1612 bool (*ConvertResult)(int64_t &) =
nullptr);
1616 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone);
1630 AMDGPUOperand::ImmTy
Type);
1633 bool isOperandModifier(
const AsmToken &Token,
const AsmToken &NextToken)
const;
1634 bool isRegOrOperandModifier(
const AsmToken &Token,
const AsmToken &NextToken)
const;
1635 bool isNamedOperandModifier(
const AsmToken &Token,
const AsmToken &NextToken)
const;
1636 bool isOpcodeModifierWithVal(
const AsmToken &Token,
const AsmToken &NextToken)
const;
1637 bool parseSP3NegModifier();
1639 bool HasLit =
false);
1642 bool HasLit =
false);
1644 bool AllowImm =
true);
1646 bool AllowImm =
true);
1651 AMDGPUOperand::ImmTy ImmTy);
1662 ParseStatus parseSymbolicOrNumericFormat(int64_t &Format);
1667 bool tryParseFmt(
const char *Pref, int64_t MaxVal, int64_t &Val);
1668 bool matchDfmtNfmt(int64_t &Dfmt, int64_t &Nfmt,
StringRef FormatStr,
SMLoc Loc);
1672 bool parseCnt(int64_t &IntVal);
1675 bool parseDepCtr(int64_t &IntVal,
unsigned &Mask);
1679 bool parseDelay(int64_t &Delay);
1685 struct OperandInfoTy {
1688 bool IsSymbolic =
false;
1689 bool IsDefined =
false;
1691 OperandInfoTy(int64_t Val) : Val(Val) {}
1694 struct StructuredOpField : OperandInfoTy {
1698 bool IsDefined =
false;
1703 virtual ~StructuredOpField() =
default;
1705 bool Error(AMDGPUAsmParser &Parser,
const Twine &Err)
const {
1706 Parser.Error(Loc,
"invalid " +
Desc +
": " + Err);
1710 virtual bool validate(AMDGPUAsmParser &Parser)
const {
1712 return Error(Parser,
"not supported on this GPU");
1714 return Error(Parser,
"only " +
Twine(Width) +
"-bit values are legal");
1722 bool parseSendMsgBody(OperandInfoTy &Msg, OperandInfoTy &
Op, OperandInfoTy &Stream);
1723 bool validateSendMsg(
const OperandInfoTy &Msg,
1724 const OperandInfoTy &
Op,
1725 const OperandInfoTy &Stream);
1728 OperandInfoTy &Width);
1734 SMLoc getOperandLoc(std::function<
bool(
const AMDGPUOperand&)>
Test,
1739 bool SearchMandatoryLiterals =
false)
const;
1748 bool validateSOPLiteral(
const MCInst &Inst)
const;
1750 bool validateVOPDRegBankConstraints(
const MCInst &Inst,
1752 bool validateIntClampSupported(
const MCInst &Inst);
1753 bool validateMIMGAtomicDMask(
const MCInst &Inst);
1754 bool validateMIMGGatherDMask(
const MCInst &Inst);
1756 bool validateMIMGDataSize(
const MCInst &Inst,
const SMLoc &IDLoc);
1757 bool validateMIMGAddrSize(
const MCInst &Inst,
const SMLoc &IDLoc);
1758 bool validateMIMGD16(
const MCInst &Inst);
1760 bool validateMIMGMSAA(
const MCInst &Inst);
1761 bool validateOpSel(
const MCInst &Inst);
1764 bool validateVccOperand(
unsigned Reg)
const;
1769 bool validateAGPRLdSt(
const MCInst &Inst)
const;
1770 bool validateVGPRAlign(
const MCInst &Inst)
const;
1774 bool validateDivScale(
const MCInst &Inst);
1777 const SMLoc &IDLoc);
1779 const unsigned CPol);
1781 std::optional<StringRef> validateLdsDirect(
const MCInst &Inst);
1782 unsigned getConstantBusLimit(
unsigned Opcode)
const;
1783 bool usesConstantBus(
const MCInst &Inst,
unsigned OpIdx);
1784 bool isInlineConstant(
const MCInst &Inst,
unsigned OpIdx)
const;
1785 unsigned findImplicitSGPRReadInVOP(
const MCInst &Inst)
const;
1811 AsmToken peekToken(
bool ShouldSkipSpace =
true);
1813 SMLoc getLoc()
const;
1817 void onBeginOfFile()
override;
1818 bool parsePrimaryExpr(
const MCExpr *&Res,
SMLoc &EndLoc)
override;
1829 bool parseSwizzleOperand(int64_t &
Op,
1830 const unsigned MinVal,
1831 const unsigned MaxVal,
1834 bool parseSwizzleOperands(
const unsigned OpNum, int64_t*
Op,
1835 const unsigned MinVal,
1836 const unsigned MaxVal,
1839 bool parseSwizzleOffset(int64_t &Imm);
1840 bool parseSwizzleMacro(int64_t &Imm);
1841 bool parseSwizzleQuadPerm(int64_t &Imm);
1842 bool parseSwizzleBitmaskPerm(int64_t &Imm);
1843 bool parseSwizzleBroadcast(int64_t &Imm);
1844 bool parseSwizzleSwap(int64_t &Imm);
1845 bool parseSwizzleReverse(int64_t &Imm);
1848 int64_t parseGPRIdxMacro();
1856 OptionalImmIndexMap &OptionalIdx);
1864 OptionalImmIndexMap &OptionalIdx);
1866 OptionalImmIndexMap &OptionalIdx);
1871 bool parseDimId(
unsigned &Encoding);
1873 bool convertDppBoundCtrl(int64_t &BoundCtrl);
1877 int64_t parseDPPCtrlSel(
StringRef Ctrl);
1878 int64_t parseDPPCtrlPerm();
1884 bool IsDPP8 =
false);
1890 AMDGPUOperand::ImmTy
Type);
1899 bool SkipDstVcc =
false,
1900 bool SkipSrcVcc =
false);
1913 return &APFloat::IEEEsingle();
1915 return &APFloat::IEEEdouble();
1917 return &APFloat::IEEEhalf();
1950 return &APFloat::IEEEsingle();
1956 return &APFloat::IEEEdouble();
1965 return &APFloat::IEEEhalf();
1973 return &APFloat::BFloat();
1988 APFloat::rmNearestTiesToEven,
1991 if (
Status != APFloat::opOK &&
1993 ((
Status & APFloat::opOverflow) != 0 ||
1994 (
Status & APFloat::opUnderflow) != 0)) {
2017bool AMDGPUOperand::isInlinableImm(
MVT type)
const {
2027 if (!isImmTy(ImmTyNone)) {
2038 if (type == MVT::f64 || type == MVT::i64) {
2040 AsmParser->hasInv2PiInlineImm());
2062 APFloat::rmNearestTiesToEven, &Lost);
2069 uint32_t ImmVal = FPLiteral.bitcastToAPInt().getZExtValue();
2071 AsmParser->hasInv2PiInlineImm());
2076 static_cast<int32_t
>(FPLiteral.bitcastToAPInt().getZExtValue()),
2077 AsmParser->hasInv2PiInlineImm());
2081 if (type == MVT::f64 || type == MVT::i64) {
2083 AsmParser->hasInv2PiInlineImm());
2092 static_cast<int16_t
>(
Literal.getLoBits(16).getSExtValue()),
2093 type, AsmParser->hasInv2PiInlineImm());
2097 static_cast<int32_t
>(
Literal.getLoBits(32).getZExtValue()),
2098 AsmParser->hasInv2PiInlineImm());
2101bool AMDGPUOperand::isLiteralImm(
MVT type)
const {
2103 if (!isImmTy(ImmTyNone)) {
2110 if (type == MVT::f64 && hasFPModifiers()) {
2127 if (type == MVT::f64) {
2132 if (type == MVT::i64) {
2145 MVT ExpectedType = (type == MVT::v2f16) ? MVT::f16
2146 : (type == MVT::v2i16) ? MVT::f32
2147 : (type == MVT::v2f32) ? MVT::f32
2154bool AMDGPUOperand::isRegClass(
unsigned RCID)
const {
2155 return isRegKind() && AsmParser->getMRI()->getRegClass(RCID).contains(
getReg());
2158bool AMDGPUOperand::isVRegWithInputMods()
const {
2159 return isRegClass(AMDGPU::VGPR_32RegClassID) ||
2161 (isRegClass(AMDGPU::VReg_64RegClassID) &&
2162 AsmParser->getFeatureBits()[AMDGPU::FeatureDPALU_DPP]);
2165template <
bool IsFake16>
bool AMDGPUOperand::isT16VRegWithInputMods()
const {
2166 return isRegClass(IsFake16 ? AMDGPU::VGPR_32_Lo128RegClassID
2167 : AMDGPU::VGPR_16_Lo128RegClassID);
2170bool AMDGPUOperand::isSDWAOperand(
MVT type)
const {
2171 if (AsmParser->isVI())
2173 if (AsmParser->isGFX9Plus())
2174 return isRegClass(AMDGPU::VS_32RegClassID) || isInlinableImm(type);
2178bool AMDGPUOperand::isSDWAFP16Operand()
const {
2179 return isSDWAOperand(MVT::f16);
2182bool AMDGPUOperand::isSDWAFP32Operand()
const {
2183 return isSDWAOperand(MVT::f32);
2186bool AMDGPUOperand::isSDWAInt16Operand()
const {
2187 return isSDWAOperand(MVT::i16);
2190bool AMDGPUOperand::isSDWAInt32Operand()
const {
2191 return isSDWAOperand(MVT::i32);
2194bool AMDGPUOperand::isBoolReg()
const {
2195 auto FB = AsmParser->getFeatureBits();
2196 return isReg() && ((FB[AMDGPU::FeatureWavefrontSize64] && isSCSrc_b64()) ||
2197 (FB[AMDGPU::FeatureWavefrontSize32] && isSCSrc_b32()));
2202 assert(isImmTy(ImmTyNone) &&
Imm.Mods.hasFPModifiers());
2217void AMDGPUOperand::addImmOperands(
MCInst &Inst,
unsigned N,
bool ApplyModifiers)
const {
2225 addLiteralImmOperand(Inst,
Imm.Val,
2227 isImmTy(ImmTyNone) &&
Imm.Mods.hasFPModifiers());
2229 assert(!isImmTy(ImmTyNone) || !hasModifiers());
2235void AMDGPUOperand::addLiteralImmOperand(
MCInst &Inst, int64_t Val,
bool ApplyModifiers)
const {
2236 const auto& InstDesc = AsmParser->getMII()->get(Inst.
getOpcode());
2241 if (ApplyModifiers) {
2244 Val = applyInputFPModifiers(Val,
Size);
2248 uint8_t OpTy = InstDesc.operands()[OpNum].OperandType;
2258 AsmParser->hasInv2PiInlineImm())) {
2267 if (
Literal.getLoBits(32) != 0) {
2268 const_cast<AMDGPUAsmParser *
>(AsmParser)->
Warning(Inst.
getLoc(),
2269 "Can't encode literal as exact 64-bit floating-point operand. "
2270 "Low 32-bits will be set to zero");
2271 Val &= 0xffffffff00000000u;
2275 setImmKindLiteral();
2291 if (AsmParser->hasInv2PiInlineImm() &&
Literal == 0x3fc45f306725feed) {
2297 setImmKindLiteral();
2333 APFloat::rmNearestTiesToEven, &lost);
2337 uint64_t ImmVal = FPLiteral.bitcastToAPInt().getZExtValue();
2340 setImmKindMandatoryLiteral();
2342 setImmKindLiteral();
2373 AsmParser->hasInv2PiInlineImm())) {
2380 setImmKindLiteral();
2398 setImmKindLiteral();
2412 setImmKindLiteral();
2421 AsmParser->hasInv2PiInlineImm())) {
2428 setImmKindLiteral();
2437 AsmParser->hasInv2PiInlineImm())) {
2444 setImmKindLiteral();
2458 AsmParser->hasInv2PiInlineImm()));
2468 AsmParser->hasInv2PiInlineImm()));
2476 setImmKindMandatoryLiteral();
2480 setImmKindMandatoryLiteral();
2487void AMDGPUOperand::addRegOperands(
MCInst &Inst,
unsigned N)
const {
2491bool AMDGPUOperand::isInlineValue()
const {
2499void AMDGPUAsmParser::createConstantSymbol(
StringRef Id, int64_t Val) {
2510 if (Is == IS_VGPR) {
2514 return AMDGPU::VGPR_32RegClassID;
2516 return AMDGPU::VReg_64RegClassID;
2518 return AMDGPU::VReg_96RegClassID;
2520 return AMDGPU::VReg_128RegClassID;
2522 return AMDGPU::VReg_160RegClassID;
2524 return AMDGPU::VReg_192RegClassID;
2526 return AMDGPU::VReg_224RegClassID;
2528 return AMDGPU::VReg_256RegClassID;
2530 return AMDGPU::VReg_288RegClassID;
2532 return AMDGPU::VReg_320RegClassID;
2534 return AMDGPU::VReg_352RegClassID;
2536 return AMDGPU::VReg_384RegClassID;
2538 return AMDGPU::VReg_512RegClassID;
2540 return AMDGPU::VReg_1024RegClassID;
2542 }
else if (Is == IS_TTMP) {
2546 return AMDGPU::TTMP_32RegClassID;
2548 return AMDGPU::TTMP_64RegClassID;
2550 return AMDGPU::TTMP_128RegClassID;
2552 return AMDGPU::TTMP_256RegClassID;
2554 return AMDGPU::TTMP_512RegClassID;
2556 }
else if (Is == IS_SGPR) {
2560 return AMDGPU::SGPR_32RegClassID;
2562 return AMDGPU::SGPR_64RegClassID;
2564 return AMDGPU::SGPR_96RegClassID;
2566 return AMDGPU::SGPR_128RegClassID;
2568 return AMDGPU::SGPR_160RegClassID;
2570 return AMDGPU::SGPR_192RegClassID;
2572 return AMDGPU::SGPR_224RegClassID;
2574 return AMDGPU::SGPR_256RegClassID;
2576 return AMDGPU::SGPR_288RegClassID;
2578 return AMDGPU::SGPR_320RegClassID;
2580 return AMDGPU::SGPR_352RegClassID;
2582 return AMDGPU::SGPR_384RegClassID;
2584 return AMDGPU::SGPR_512RegClassID;
2586 }
else if (Is == IS_AGPR) {
2590 return AMDGPU::AGPR_32RegClassID;
2592 return AMDGPU::AReg_64RegClassID;
2594 return AMDGPU::AReg_96RegClassID;
2596 return AMDGPU::AReg_128RegClassID;
2598 return AMDGPU::AReg_160RegClassID;
2600 return AMDGPU::AReg_192RegClassID;
2602 return AMDGPU::AReg_224RegClassID;
2604 return AMDGPU::AReg_256RegClassID;
2606 return AMDGPU::AReg_288RegClassID;
2608 return AMDGPU::AReg_320RegClassID;
2610 return AMDGPU::AReg_352RegClassID;
2612 return AMDGPU::AReg_384RegClassID;
2614 return AMDGPU::AReg_512RegClassID;
2616 return AMDGPU::AReg_1024RegClassID;
2624 .
Case(
"exec", AMDGPU::EXEC)
2625 .
Case(
"vcc", AMDGPU::VCC)
2626 .
Case(
"flat_scratch", AMDGPU::FLAT_SCR)
2627 .
Case(
"xnack_mask", AMDGPU::XNACK_MASK)
2628 .
Case(
"shared_base", AMDGPU::SRC_SHARED_BASE)
2629 .
Case(
"src_shared_base", AMDGPU::SRC_SHARED_BASE)
2630 .
Case(
"shared_limit", AMDGPU::SRC_SHARED_LIMIT)
2631 .
Case(
"src_shared_limit", AMDGPU::SRC_SHARED_LIMIT)
2632 .
Case(
"private_base", AMDGPU::SRC_PRIVATE_BASE)
2633 .
Case(
"src_private_base", AMDGPU::SRC_PRIVATE_BASE)
2634 .
Case(
"private_limit", AMDGPU::SRC_PRIVATE_LIMIT)
2635 .
Case(
"src_private_limit", AMDGPU::SRC_PRIVATE_LIMIT)
2636 .
Case(
"pops_exiting_wave_id", AMDGPU::SRC_POPS_EXITING_WAVE_ID)
2637 .
Case(
"src_pops_exiting_wave_id", AMDGPU::SRC_POPS_EXITING_WAVE_ID)
2638 .
Case(
"lds_direct", AMDGPU::LDS_DIRECT)
2639 .
Case(
"src_lds_direct", AMDGPU::LDS_DIRECT)
2640 .
Case(
"m0", AMDGPU::M0)
2641 .
Case(
"vccz", AMDGPU::SRC_VCCZ)
2642 .
Case(
"src_vccz", AMDGPU::SRC_VCCZ)
2643 .
Case(
"execz", AMDGPU::SRC_EXECZ)
2644 .
Case(
"src_execz", AMDGPU::SRC_EXECZ)
2645 .
Case(
"scc", AMDGPU::SRC_SCC)
2646 .
Case(
"src_scc", AMDGPU::SRC_SCC)
2647 .
Case(
"tba", AMDGPU::TBA)
2648 .
Case(
"tma", AMDGPU::TMA)
2649 .
Case(
"flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
2650 .
Case(
"flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
2651 .
Case(
"xnack_mask_lo", AMDGPU::XNACK_MASK_LO)
2652 .
Case(
"xnack_mask_hi", AMDGPU::XNACK_MASK_HI)
2653 .
Case(
"vcc_lo", AMDGPU::VCC_LO)
2654 .
Case(
"vcc_hi", AMDGPU::VCC_HI)
2655 .
Case(
"exec_lo", AMDGPU::EXEC_LO)
2656 .
Case(
"exec_hi", AMDGPU::EXEC_HI)
2657 .
Case(
"tma_lo", AMDGPU::TMA_LO)
2658 .
Case(
"tma_hi", AMDGPU::TMA_HI)
2659 .
Case(
"tba_lo", AMDGPU::TBA_LO)
2660 .
Case(
"tba_hi", AMDGPU::TBA_HI)
2661 .
Case(
"pc", AMDGPU::PC_REG)
2662 .
Case(
"null", AMDGPU::SGPR_NULL)
2666bool AMDGPUAsmParser::ParseRegister(
MCRegister &RegNo,
SMLoc &StartLoc,
2667 SMLoc &EndLoc,
bool RestoreOnFailure) {
2668 auto R = parseRegister();
2669 if (!R)
return true;
2671 RegNo =
R->getReg();
2672 StartLoc =
R->getStartLoc();
2673 EndLoc =
R->getEndLoc();
2679 return ParseRegister(Reg, StartLoc, EndLoc,
false);
2684 bool Result = ParseRegister(Reg, StartLoc, EndLoc,
true);
2685 bool PendingErrors = getParser().hasPendingError();
2686 getParser().clearPendingErrors();
2694bool AMDGPUAsmParser::AddNextRegisterToList(
unsigned &Reg,
unsigned &RegWidth,
2695 RegisterKind RegKind,
unsigned Reg1,
2699 if (Reg == AMDGPU::EXEC_LO && Reg1 == AMDGPU::EXEC_HI) {
2704 if (Reg == AMDGPU::FLAT_SCR_LO && Reg1 == AMDGPU::FLAT_SCR_HI) {
2705 Reg = AMDGPU::FLAT_SCR;
2709 if (Reg == AMDGPU::XNACK_MASK_LO && Reg1 == AMDGPU::XNACK_MASK_HI) {
2710 Reg = AMDGPU::XNACK_MASK;
2714 if (Reg == AMDGPU::VCC_LO && Reg1 == AMDGPU::VCC_HI) {
2719 if (Reg == AMDGPU::TBA_LO && Reg1 == AMDGPU::TBA_HI) {
2724 if (Reg == AMDGPU::TMA_LO && Reg1 == AMDGPU::TMA_HI) {
2729 Error(Loc,
"register does not fit in the list");
2735 if (Reg1 != Reg + RegWidth / 32) {
2736 Error(Loc,
"registers in a list must have consecutive indices");
2754 {{
"ttmp"}, IS_TTMP},
2760 return Kind == IS_VGPR ||
2768 if (Str.starts_with(Reg.Name))
2774 return !Str.getAsInteger(10, Num);
2778AMDGPUAsmParser::isRegister(
const AsmToken &Token,
2795 if (!RegSuffix.
empty()) {
2813AMDGPUAsmParser::isRegister()
2815 return isRegister(getToken(), peekToken());
2818unsigned AMDGPUAsmParser::getRegularReg(RegisterKind RegKind,
unsigned RegNum,
2819 unsigned SubReg,
unsigned RegWidth,
2823 unsigned AlignSize = 1;
2824 if (RegKind == IS_SGPR || RegKind == IS_TTMP) {
2830 if (RegNum % AlignSize != 0) {
2831 Error(Loc,
"invalid register alignment");
2832 return AMDGPU::NoRegister;
2835 unsigned RegIdx = RegNum / AlignSize;
2838 Error(Loc,
"invalid or unsupported register size");
2839 return AMDGPU::NoRegister;
2845 Error(Loc,
"register index is out of range");
2846 return AMDGPU::NoRegister;
2856 assert(Reg &&
"Invalid subregister!");
2862bool AMDGPUAsmParser::ParseRegRange(
unsigned &Num,
unsigned &RegWidth) {
2863 int64_t RegLo, RegHi;
2867 SMLoc FirstIdxLoc = getLoc();
2874 SecondIdxLoc = getLoc();
2884 if (!isUInt<32>(RegLo)) {
2885 Error(FirstIdxLoc,
"invalid register index");
2889 if (!isUInt<32>(RegHi)) {
2890 Error(SecondIdxLoc,
"invalid register index");
2894 if (RegLo > RegHi) {
2895 Error(FirstIdxLoc,
"first register index should not exceed second index");
2899 Num =
static_cast<unsigned>(RegLo);
2900 RegWidth = 32 * ((RegHi - RegLo) + 1);
2904unsigned AMDGPUAsmParser::ParseSpecialReg(RegisterKind &RegKind,
2905 unsigned &RegNum,
unsigned &RegWidth,
2912 RegKind = IS_SPECIAL;
2919unsigned AMDGPUAsmParser::ParseRegularReg(RegisterKind &RegKind,
2920 unsigned &RegNum,
unsigned &RegWidth,
2924 auto Loc = getLoc();
2928 Error(Loc,
"invalid register name");
2929 return AMDGPU::NoRegister;
2937 unsigned SubReg = NoSubRegister;
2938 if (!RegSuffix.
empty()) {
2946 Error(Loc,
"invalid register index");
2947 return AMDGPU::NoRegister;
2952 if (!ParseRegRange(RegNum, RegWidth))
2953 return AMDGPU::NoRegister;
2956 return getRegularReg(RegKind, RegNum,
SubReg, RegWidth, Loc);
2959unsigned AMDGPUAsmParser::ParseRegList(RegisterKind &RegKind,
unsigned &RegNum,
2962 unsigned Reg = AMDGPU::NoRegister;
2963 auto ListLoc = getLoc();
2966 "expected a register or a list of registers")) {
2967 return AMDGPU::NoRegister;
2972 auto Loc = getLoc();
2973 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth))
2974 return AMDGPU::NoRegister;
2975 if (RegWidth != 32) {
2976 Error(Loc,
"expected a single 32-bit register");
2977 return AMDGPU::NoRegister;
2981 RegisterKind NextRegKind;
2982 unsigned NextReg, NextRegNum, NextRegWidth;
2985 if (!ParseAMDGPURegister(NextRegKind, NextReg,
2986 NextRegNum, NextRegWidth,
2988 return AMDGPU::NoRegister;
2990 if (NextRegWidth != 32) {
2991 Error(Loc,
"expected a single 32-bit register");
2992 return AMDGPU::NoRegister;
2994 if (NextRegKind != RegKind) {
2995 Error(Loc,
"registers in a list must be of the same kind");
2996 return AMDGPU::NoRegister;
2998 if (!AddNextRegisterToList(Reg, RegWidth, RegKind, NextReg, Loc))
2999 return AMDGPU::NoRegister;
3003 "expected a comma or a closing square bracket")) {
3004 return AMDGPU::NoRegister;
3008 Reg = getRegularReg(RegKind, RegNum, NoSubRegister, RegWidth, ListLoc);
3013bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind &RegKind,
unsigned &Reg,
3014 unsigned &RegNum,
unsigned &RegWidth,
3016 auto Loc = getLoc();
3017 Reg = AMDGPU::NoRegister;
3020 Reg = ParseSpecialReg(RegKind, RegNum, RegWidth, Tokens);
3021 if (Reg == AMDGPU::NoRegister)
3022 Reg = ParseRegularReg(RegKind, RegNum, RegWidth, Tokens);
3024 Reg = ParseRegList(RegKind, RegNum, RegWidth, Tokens);
3028 if (Reg == AMDGPU::NoRegister) {
3029 assert(Parser.hasPendingError());
3033 if (!subtargetHasRegister(*
TRI, Reg)) {
3034 if (Reg == AMDGPU::SGPR_NULL) {
3035 Error(Loc,
"'null' operand is not supported on this GPU");
3038 " register not available on this GPU");
3046bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind &RegKind,
unsigned &Reg,
3047 unsigned &RegNum,
unsigned &RegWidth,
3048 bool RestoreOnFailure ) {
3049 Reg = AMDGPU::NoRegister;
3052 if (ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth, Tokens)) {
3053 if (RestoreOnFailure) {
3054 while (!Tokens.
empty()) {
3063std::optional<StringRef>
3064AMDGPUAsmParser::getGprCountSymbolName(RegisterKind RegKind) {
3067 return StringRef(
".amdgcn.next_free_vgpr");
3069 return StringRef(
".amdgcn.next_free_sgpr");
3071 return std::nullopt;
3075void AMDGPUAsmParser::initializeGprCountSymbol(RegisterKind RegKind) {
3076 auto SymbolName = getGprCountSymbolName(RegKind);
3077 assert(SymbolName &&
"initializing invalid register kind");
3078 MCSymbol *
Sym = getContext().getOrCreateSymbol(*SymbolName);
3082bool AMDGPUAsmParser::updateGprCountSymbols(RegisterKind RegKind,
3083 unsigned DwordRegIndex,
3084 unsigned RegWidth) {
3089 auto SymbolName = getGprCountSymbolName(RegKind);
3092 MCSymbol *
Sym = getContext().getOrCreateSymbol(*SymbolName);
3094 int64_t NewMax = DwordRegIndex +
divideCeil(RegWidth, 32) - 1;
3097 if (!
Sym->isVariable())
3098 return !
Error(getLoc(),
3099 ".amdgcn.next_free_{v,s}gpr symbols must be variable");
3100 if (!
Sym->getVariableValue(
false)->evaluateAsAbsolute(OldCount))
3103 ".amdgcn.next_free_{v,s}gpr symbols must be absolute expressions");
3105 if (OldCount <= NewMax)
3111std::unique_ptr<AMDGPUOperand>
3112AMDGPUAsmParser::parseRegister(
bool RestoreOnFailure) {
3113 const auto &Tok = getToken();
3114 SMLoc StartLoc = Tok.getLoc();
3115 SMLoc EndLoc = Tok.getEndLoc();
3116 RegisterKind RegKind;
3117 unsigned Reg, RegNum, RegWidth;
3119 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth)) {
3123 if (!updateGprCountSymbols(RegKind, RegNum, RegWidth))
3126 KernelScope.usesRegister(RegKind, RegNum, RegWidth);
3127 return AMDGPUOperand::CreateReg(
this, Reg, StartLoc, EndLoc);
3131 bool HasSP3AbsModifier,
bool HasLit) {
3139 HasLit = trySkipId(
"lit");
3151 const auto& Tok = getToken();
3152 const auto& NextTok = peekToken();
3155 bool Negate =
false;
3163 AMDGPUOperand::Modifiers Mods;
3174 APFloat RealVal(APFloat::IEEEdouble());
3175 auto roundMode = APFloat::rmNearestTiesToEven;
3176 if (
errorToBool(RealVal.convertFromString(Num, roundMode).takeError()))
3179 RealVal.changeSign();
3182 AMDGPUOperand::CreateImm(
this, RealVal.bitcastToAPInt().getZExtValue(), S,
3183 AMDGPUOperand::ImmTyNone,
true));
3184 AMDGPUOperand &
Op =
static_cast<AMDGPUOperand &
>(*
Operands.back());
3185 Op.setModifiers(Mods);
3194 if (HasSP3AbsModifier) {
3203 if (getParser().parsePrimaryExpr(Expr, EndLoc,
nullptr))
3206 if (Parser.parseExpression(Expr))
3210 if (Expr->evaluateAsAbsolute(IntVal)) {
3211 Operands.push_back(AMDGPUOperand::CreateImm(
this, IntVal, S));
3212 AMDGPUOperand &
Op =
static_cast<AMDGPUOperand &
>(*
Operands.back());
3213 Op.setModifiers(Mods);
3217 Operands.push_back(AMDGPUOperand::CreateExpr(
this, Expr, S));
3230 if (
auto R = parseRegister()) {
3239 bool HasSP3AbsMod,
bool HasLit) {
3245 return parseImm(
Operands, HasSP3AbsMod, HasLit);
3249AMDGPUAsmParser::isNamedOperandModifier(
const AsmToken &Token,
const AsmToken &NextToken)
const {
3252 return str ==
"abs" || str ==
"neg" || str ==
"sext";
3258AMDGPUAsmParser::isOpcodeModifierWithVal(
const AsmToken &Token,
const AsmToken &NextToken)
const {
3263AMDGPUAsmParser::isOperandModifier(
const AsmToken &Token,
const AsmToken &NextToken)
const {
3264 return isNamedOperandModifier(Token, NextToken) || Token.
is(
AsmToken::Pipe);
3268AMDGPUAsmParser::isRegOrOperandModifier(
const AsmToken &Token,
const AsmToken &NextToken)
const {
3269 return isRegister(Token, NextToken) || isOperandModifier(Token, NextToken);
3286AMDGPUAsmParser::isModifier() {
3290 peekTokens(NextToken);
3292 return isOperandModifier(Tok, NextToken[0]) ||
3293 (Tok.
is(
AsmToken::Minus) && isRegOrOperandModifier(NextToken[0], NextToken[1])) ||
3294 isOpcodeModifierWithVal(Tok, NextToken[0]);
3320AMDGPUAsmParser::parseSP3NegModifier() {
3323 peekTokens(NextToken);
3326 (isRegister(NextToken[0], NextToken[1]) ||
3328 isId(NextToken[0],
"abs"))) {
3346 return Error(getLoc(),
"invalid syntax, expected 'neg' modifier");
3348 SP3Neg = parseSP3NegModifier();
3351 Neg = trySkipId(
"neg");
3353 return Error(Loc,
"expected register or immediate");
3357 Abs = trySkipId(
"abs");
3361 Lit = trySkipId(
"lit");
3368 return Error(Loc,
"expected register or immediate");
3372 Res = parseRegOrImm(
Operands, SP3Abs, Lit);
3379 if (Lit && !
Operands.back()->isImm())
3380 Error(Loc,
"expected immediate with lit modifier");
3382 if (SP3Abs && !skipToken(
AsmToken::Pipe,
"expected vertical bar"))
3391 AMDGPUOperand::Modifiers Mods;
3392 Mods.Abs = Abs || SP3Abs;
3393 Mods.Neg = Neg || SP3Neg;
3396 if (Mods.hasFPModifiers() || Lit) {
3397 AMDGPUOperand &
Op =
static_cast<AMDGPUOperand &
>(*
Operands.back());
3399 return Error(
Op.getStartLoc(),
"expected an absolute expression");
3400 Op.setModifiers(Mods);
3408 bool Sext = trySkipId(
"sext");
3409 if (Sext && !skipToken(
AsmToken::LParen,
"expected left paren after sext"))
3424 AMDGPUOperand::Modifiers Mods;
3427 if (Mods.hasIntModifiers()) {
3428 AMDGPUOperand &
Op =
static_cast<AMDGPUOperand &
>(*
Operands.back());
3430 return Error(
Op.getStartLoc(),
"expected an absolute expression");
3431 Op.setModifiers(Mods);
3438 return parseRegOrImmWithFPInputMods(
Operands,
false);
3442 return parseRegOrImmWithIntInputMods(
Operands,
false);
3446 auto Loc = getLoc();
3447 if (trySkipId(
"off")) {
3448 Operands.push_back(AMDGPUOperand::CreateImm(
this, 0, Loc,
3449 AMDGPUOperand::ImmTyOff,
false));
3456 std::unique_ptr<AMDGPUOperand>
Reg = parseRegister();
3458 Operands.push_back(std::move(Reg));
3465unsigned AMDGPUAsmParser::checkTargetMatchPredicate(
MCInst &Inst) {
3472 return Match_InvalidOperand;
3474 if (Inst.
getOpcode() == AMDGPU::V_MAC_F32_sdwa_vi ||
3475 Inst.
getOpcode() == AMDGPU::V_MAC_F16_sdwa_vi) {
3480 if (!
Op.isImm() ||
Op.getImm() != AMDGPU::SDWA::SdwaSel::DWORD) {
3481 return Match_InvalidOperand;
3485 return Match_Success;
3489 static const unsigned Variants[] = {
3500 if (isForcedDPP() && isForcedVOP3()) {
3504 if (getForcedEncodingSize() == 32) {
3509 if (isForcedVOP3()) {
3514 if (isForcedSDWA()) {
3520 if (isForcedDPP()) {
3528StringRef AMDGPUAsmParser::getMatchedVariantName()
const {
3529 if (isForcedDPP() && isForcedVOP3())
3532 if (getForcedEncodingSize() == 32)
3547unsigned AMDGPUAsmParser::findImplicitSGPRReadInVOP(
const MCInst &Inst)
const {
3551 case AMDGPU::FLAT_SCR:
3553 case AMDGPU::VCC_LO:
3554 case AMDGPU::VCC_HI:
3561 return AMDGPU::NoRegister;
3568bool AMDGPUAsmParser::isInlineConstant(
const MCInst &Inst,
3569 unsigned OpIdx)
const {
3579 int64_t Val = MO.
getImm();
3628unsigned AMDGPUAsmParser::getConstantBusLimit(
unsigned Opcode)
const {
3634 case AMDGPU::V_LSHLREV_B64_e64:
3635 case AMDGPU::V_LSHLREV_B64_gfx10:
3636 case AMDGPU::V_LSHLREV_B64_e64_gfx11:
3637 case AMDGPU::V_LSHLREV_B64_e32_gfx12:
3638 case AMDGPU::V_LSHLREV_B64_e64_gfx12:
3639 case AMDGPU::V_LSHRREV_B64_e64:
3640 case AMDGPU::V_LSHRREV_B64_gfx10:
3641 case AMDGPU::V_LSHRREV_B64_e64_gfx11:
3642 case AMDGPU::V_LSHRREV_B64_e64_gfx12:
3643 case AMDGPU::V_ASHRREV_I64_e64:
3644 case AMDGPU::V_ASHRREV_I64_gfx10:
3645 case AMDGPU::V_ASHRREV_I64_e64_gfx11:
3646 case AMDGPU::V_ASHRREV_I64_e64_gfx12:
3647 case AMDGPU::V_LSHL_B64_e64:
3648 case AMDGPU::V_LSHR_B64_e64:
3649 case AMDGPU::V_ASHR_I64_e64:
3662 bool AddMandatoryLiterals =
false) {
3668 int16_t ImmDeferredIdx =
3685bool AMDGPUAsmParser::usesConstantBus(
const MCInst &Inst,
unsigned OpIdx) {
3688 return !isInlineConstant(Inst, OpIdx);
3695 return isSGPR(PReg,
TRI) && PReg != SGPR_NULL;
3706 const unsigned Opcode = Inst.
getOpcode();
3707 if (Opcode != V_WRITELANE_B32_gfx6_gfx7 && Opcode != V_WRITELANE_B32_vi)
3710 if (!LaneSelOp.
isReg())
3713 return LaneSelReg ==
M0 || LaneSelReg == M0_gfxpre11;
3716bool AMDGPUAsmParser::validateConstantBusLimitations(
3718 const unsigned Opcode = Inst.
getOpcode();
3720 unsigned LastSGPR = AMDGPU::NoRegister;
3721 unsigned ConstantBusUseCount = 0;
3722 unsigned NumLiterals = 0;
3723 unsigned LiteralSize;
3725 if (!(
Desc.TSFlags &
3741 unsigned SGPRUsed = findImplicitSGPRReadInVOP(Inst);
3742 if (SGPRUsed != AMDGPU::NoRegister) {
3743 SGPRsUsed.
insert(SGPRUsed);
3744 ++ConstantBusUseCount;
3749 for (
int OpIdx : OpIndices) {
3754 if (usesConstantBus(Inst, OpIdx)) {
3763 if (SGPRsUsed.
insert(LastSGPR).second) {
3764 ++ConstantBusUseCount;
3784 if (NumLiterals == 0) {
3787 }
else if (LiteralSize !=
Size) {
3793 ConstantBusUseCount += NumLiterals;
3795 if (ConstantBusUseCount <= getConstantBusLimit(Opcode))
3801 Error(Loc,
"invalid operand (violates constant bus restrictions)");
3805bool AMDGPUAsmParser::validateVOPDRegBankConstraints(
3808 const unsigned Opcode = Inst.
getOpcode();
3814 auto getVRegIdx = [&](
unsigned,
unsigned OperandIdx) {
3822 bool SkipSrc = Opcode == AMDGPU::V_DUAL_MOV_B32_e32_X_MOV_B32_e32_gfx12;
3825 auto InvalidCompOprIdx =
3826 InstInfo.getInvalidCompOperandIndex(getVRegIdx, SkipSrc);
3827 if (!InvalidCompOprIdx)
3830 auto CompOprIdx = *InvalidCompOprIdx;
3832 std::max(InstInfo[
VOPD::X].getIndexInParsedOperands(CompOprIdx),
3833 InstInfo[
VOPD::Y].getIndexInParsedOperands(CompOprIdx));
3836 auto Loc = ((AMDGPUOperand &)*
Operands[ParsedIdx]).getStartLoc();
3837 if (CompOprIdx == VOPD::Component::DST) {
3838 Error(Loc,
"one dst register must be even and the other odd");
3840 auto CompSrcIdx = CompOprIdx - VOPD::Component::DST_NUM;
3842 " operands must use different VGPR banks");
3848bool AMDGPUAsmParser::validateIntClampSupported(
const MCInst &Inst) {
3865bool AMDGPUAsmParser::validateMIMGDataSize(
const MCInst &Inst,
3866 const SMLoc &IDLoc) {
3885 unsigned TFESize = (TFEIdx != -1 && Inst.
getOperand(TFEIdx).
getImm()) ? 1 : 0;
3890 bool IsPackedD16 =
false;
3895 IsPackedD16 = D16Idx >= 0;
3897 DataSize = (DataSize + 1) / 2;
3900 if ((VDataSize / 4) == DataSize + TFESize)
3905 Modifiers = IsPackedD16 ?
"dmask and d16" :
"dmask";
3907 Modifiers = IsPackedD16 ?
"dmask, d16 and tfe" :
"dmask and tfe";
3909 Error(IDLoc,
Twine(
"image data size does not match ") + Modifiers);
3913bool AMDGPUAsmParser::validateMIMGAddrSize(
const MCInst &Inst,
3914 const SMLoc &IDLoc) {
3927 : AMDGPU::OpName::rsrc;
3934 assert(SrsrcIdx > VAddr0Idx);
3937 if (BaseOpcode->
BVH) {
3938 if (IsA16 == BaseOpcode->
A16)
3940 Error(IDLoc,
"image address size does not match a16");
3946 bool IsNSA = SrsrcIdx - VAddr0Idx > 1;
3947 unsigned ActualAddrSize =
3948 IsNSA ? SrsrcIdx - VAddr0Idx
3951 unsigned ExpectedAddrSize =
3955 if (hasPartialNSAEncoding() &&
3958 int VAddrLastIdx = SrsrcIdx - 1;
3959 unsigned VAddrLastSize =
3962 ActualAddrSize = VAddrLastIdx - VAddr0Idx + VAddrLastSize;
3965 if (ExpectedAddrSize > 12)
3966 ExpectedAddrSize = 16;
3971 if (ActualAddrSize == 8 && (ExpectedAddrSize >= 5 && ExpectedAddrSize <= 7))
3975 if (ActualAddrSize == ExpectedAddrSize)
3978 Error(IDLoc,
"image address size does not match dim and a16");
3982bool AMDGPUAsmParser::validateMIMGAtomicDMask(
const MCInst &Inst) {
3989 if (!
Desc.mayLoad() || !
Desc.mayStore())
3999 return DMask == 0x1 || DMask == 0x3 || DMask == 0xf;
4002bool AMDGPUAsmParser::validateMIMGGatherDMask(
const MCInst &Inst) {
4018 return DMask == 0x1 || DMask == 0x2 || DMask == 0x4 || DMask == 0x8;
4021bool AMDGPUAsmParser::validateMIMGDim(
const MCInst &Inst,
4036 for (
unsigned i = 1, e =
Operands.size(); i != e; ++i) {
4037 AMDGPUOperand &
Op = ((AMDGPUOperand &)*
Operands[i]);
4044bool AMDGPUAsmParser::validateMIMGMSAA(
const MCInst &Inst) {
4055 if (!BaseOpcode->
MSAA)
4064 return DimInfo->
MSAA;
4070 case AMDGPU::V_MOVRELS_B32_sdwa_gfx10:
4071 case AMDGPU::V_MOVRELSD_B32_sdwa_gfx10:
4072 case AMDGPU::V_MOVRELSD_2_B32_sdwa_gfx10:
4082bool AMDGPUAsmParser::validateMovrels(
const MCInst &Inst,
4106 Error(ErrLoc,
"source operand must be a VGPR");
4110bool AMDGPUAsmParser::validateMAIAccWrite(
const MCInst &Inst,
4115 if (Opc != AMDGPU::V_ACCVGPR_WRITE_B32_vi)
4129 "source operand must be either a VGPR or an inline constant");
4136bool AMDGPUAsmParser::validateMAISrc2(
const MCInst &Inst,
4142 !getFeatureBits()[FeatureMFMAInlineLiteralBug])
4149 if (Inst.
getOperand(Src2Idx).
isImm() && isInlineConstant(Inst, Src2Idx)) {
4151 "inline constants are not allowed for this operand");
4158bool AMDGPUAsmParser::validateMFMA(
const MCInst &Inst,
4176 if (Src2Reg == DstReg)
4180 if (
TRI->getRegClass(
Desc.operands()[0].RegClass).getSizeInBits() <= 128)
4183 if (
TRI->regsOverlap(Src2Reg, DstReg)) {
4185 "source 2 operand must not partially overlap with dst");
4192bool AMDGPUAsmParser::validateDivScale(
const MCInst &Inst) {
4196 case V_DIV_SCALE_F32_gfx6_gfx7:
4197 case V_DIV_SCALE_F32_vi:
4198 case V_DIV_SCALE_F32_gfx10:
4199 case V_DIV_SCALE_F64_gfx6_gfx7:
4200 case V_DIV_SCALE_F64_vi:
4201 case V_DIV_SCALE_F64_gfx10:
4207 for (
auto Name : {AMDGPU::OpName::src0_modifiers,
4208 AMDGPU::OpName::src2_modifiers,
4209 AMDGPU::OpName::src2_modifiers}) {
4220bool AMDGPUAsmParser::validateMIMGD16(
const MCInst &Inst) {
4240 case AMDGPU::V_SUBREV_F32_e32:
4241 case AMDGPU::V_SUBREV_F32_e64:
4242 case AMDGPU::V_SUBREV_F32_e32_gfx10:
4243 case AMDGPU::V_SUBREV_F32_e32_gfx6_gfx7:
4244 case AMDGPU::V_SUBREV_F32_e32_vi:
4245 case AMDGPU::V_SUBREV_F32_e64_gfx10:
4246 case AMDGPU::V_SUBREV_F32_e64_gfx6_gfx7:
4247 case AMDGPU::V_SUBREV_F32_e64_vi:
4249 case AMDGPU::V_SUBREV_CO_U32_e32:
4250 case AMDGPU::V_SUBREV_CO_U32_e64:
4251 case AMDGPU::V_SUBREV_I32_e32_gfx6_gfx7:
4252 case AMDGPU::V_SUBREV_I32_e64_gfx6_gfx7:
4254 case AMDGPU::V_SUBBREV_U32_e32:
4255 case AMDGPU::V_SUBBREV_U32_e64:
4256 case AMDGPU::V_SUBBREV_U32_e32_gfx6_gfx7:
4257 case AMDGPU::V_SUBBREV_U32_e32_vi:
4258 case AMDGPU::V_SUBBREV_U32_e64_gfx6_gfx7:
4259 case AMDGPU::V_SUBBREV_U32_e64_vi:
4261 case AMDGPU::V_SUBREV_U32_e32:
4262 case AMDGPU::V_SUBREV_U32_e64:
4263 case AMDGPU::V_SUBREV_U32_e32_gfx9:
4264 case AMDGPU::V_SUBREV_U32_e32_vi:
4265 case AMDGPU::V_SUBREV_U32_e64_gfx9:
4266 case AMDGPU::V_SUBREV_U32_e64_vi:
4268 case AMDGPU::V_SUBREV_F16_e32:
4269 case AMDGPU::V_SUBREV_F16_e64:
4270 case AMDGPU::V_SUBREV_F16_e32_gfx10:
4271 case AMDGPU::V_SUBREV_F16_e32_vi:
4272 case AMDGPU::V_SUBREV_F16_e64_gfx10:
4273 case AMDGPU::V_SUBREV_F16_e64_vi:
4275 case AMDGPU::V_SUBREV_U16_e32:
4276 case AMDGPU::V_SUBREV_U16_e64:
4277 case AMDGPU::V_SUBREV_U16_e32_vi:
4278 case AMDGPU::V_SUBREV_U16_e64_vi:
4280 case AMDGPU::V_SUBREV_CO_U32_e32_gfx9:
4281 case AMDGPU::V_SUBREV_CO_U32_e64_gfx10:
4282 case AMDGPU::V_SUBREV_CO_U32_e64_gfx9:
4284 case AMDGPU::V_SUBBREV_CO_U32_e32_gfx9:
4285 case AMDGPU::V_SUBBREV_CO_U32_e64_gfx9:
4287 case AMDGPU::V_SUBREV_NC_U32_e32_gfx10:
4288 case AMDGPU::V_SUBREV_NC_U32_e64_gfx10:
4290 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx10:
4291 case AMDGPU::V_SUBREV_CO_CI_U32_e64_gfx10:
4293 case AMDGPU::V_LSHRREV_B32_e32:
4294 case AMDGPU::V_LSHRREV_B32_e64:
4295 case AMDGPU::V_LSHRREV_B32_e32_gfx6_gfx7:
4296 case AMDGPU::V_LSHRREV_B32_e64_gfx6_gfx7:
4297 case AMDGPU::V_LSHRREV_B32_e32_vi:
4298 case AMDGPU::V_LSHRREV_B32_e64_vi:
4299 case AMDGPU::V_LSHRREV_B32_e32_gfx10:
4300 case AMDGPU::V_LSHRREV_B32_e64_gfx10:
4302 case AMDGPU::V_ASHRREV_I32_e32:
4303 case AMDGPU::V_ASHRREV_I32_e64:
4304 case AMDGPU::V_ASHRREV_I32_e32_gfx10:
4305 case AMDGPU::V_ASHRREV_I32_e32_gfx6_gfx7:
4306 case AMDGPU::V_ASHRREV_I32_e32_vi:
4307 case AMDGPU::V_ASHRREV_I32_e64_gfx10:
4308 case AMDGPU::V_ASHRREV_I32_e64_gfx6_gfx7:
4309 case AMDGPU::V_ASHRREV_I32_e64_vi:
4311 case AMDGPU::V_LSHLREV_B32_e32:
4312 case AMDGPU::V_LSHLREV_B32_e64:
4313 case AMDGPU::V_LSHLREV_B32_e32_gfx10:
4314 case AMDGPU::V_LSHLREV_B32_e32_gfx6_gfx7:
4315 case AMDGPU::V_LSHLREV_B32_e32_vi:
4316 case AMDGPU::V_LSHLREV_B32_e64_gfx10:
4317 case AMDGPU::V_LSHLREV_B32_e64_gfx6_gfx7:
4318 case AMDGPU::V_LSHLREV_B32_e64_vi:
4320 case AMDGPU::V_LSHLREV_B16_e32:
4321 case AMDGPU::V_LSHLREV_B16_e64:
4322 case AMDGPU::V_LSHLREV_B16_e32_vi:
4323 case AMDGPU::V_LSHLREV_B16_e64_vi:
4324 case AMDGPU::V_LSHLREV_B16_gfx10:
4326 case AMDGPU::V_LSHRREV_B16_e32:
4327 case AMDGPU::V_LSHRREV_B16_e64:
4328 case AMDGPU::V_LSHRREV_B16_e32_vi:
4329 case AMDGPU::V_LSHRREV_B16_e64_vi:
4330 case AMDGPU::V_LSHRREV_B16_gfx10:
4332 case AMDGPU::V_ASHRREV_I16_e32:
4333 case AMDGPU::V_ASHRREV_I16_e64:
4334 case AMDGPU::V_ASHRREV_I16_e32_vi:
4335 case AMDGPU::V_ASHRREV_I16_e64_vi:
4336 case AMDGPU::V_ASHRREV_I16_gfx10:
4338 case AMDGPU::V_LSHLREV_B64_e64:
4339 case AMDGPU::V_LSHLREV_B64_gfx10:
4340 case AMDGPU::V_LSHLREV_B64_vi:
4342 case AMDGPU::V_LSHRREV_B64_e64:
4343 case AMDGPU::V_LSHRREV_B64_gfx10:
4344 case AMDGPU::V_LSHRREV_B64_vi:
4346 case AMDGPU::V_ASHRREV_I64_e64:
4347 case AMDGPU::V_ASHRREV_I64_gfx10:
4348 case AMDGPU::V_ASHRREV_I64_vi:
4350 case AMDGPU::V_PK_LSHLREV_B16:
4351 case AMDGPU::V_PK_LSHLREV_B16_gfx10:
4352 case AMDGPU::V_PK_LSHLREV_B16_vi:
4354 case AMDGPU::V_PK_LSHRREV_B16:
4355 case AMDGPU::V_PK_LSHRREV_B16_gfx10:
4356 case AMDGPU::V_PK_LSHRREV_B16_vi:
4357 case AMDGPU::V_PK_ASHRREV_I16:
4358 case AMDGPU::V_PK_ASHRREV_I16_gfx10:
4359 case AMDGPU::V_PK_ASHRREV_I16_vi:
4366std::optional<StringRef>
4367AMDGPUAsmParser::validateLdsDirect(
const MCInst &Inst) {
4369 using namespace SIInstrFlags;
4370 const unsigned Opcode = Inst.
getOpcode();
4376 if ((
Desc.TSFlags & Enc) == 0)
4377 return std::nullopt;
4379 for (
auto SrcName : {OpName::src0, OpName::src1, OpName::src2}) {
4384 if (Src.isReg() && Src.getReg() == LDS_DIRECT) {
4387 return StringRef(
"lds_direct is not supported on this GPU");
4390 return StringRef(
"lds_direct cannot be used with this instruction");
4392 if (SrcName != OpName::src0)
4393 return StringRef(
"lds_direct may be used as src0 only");
4397 return std::nullopt;
4401 for (
unsigned i = 1, e =
Operands.size(); i != e; ++i) {
4402 AMDGPUOperand &
Op = ((AMDGPUOperand &)*
Operands[i]);
4403 if (
Op.isFlatOffset())
4404 return Op.getStartLoc();
4409bool AMDGPUAsmParser::validateOffset(
const MCInst &Inst,
4418 return validateFlatOffset(Inst,
Operands);
4421 return validateSMEMOffset(Inst,
Operands);
4426 const unsigned OffsetSize = 24;
4427 if (!
isIntN(OffsetSize,
Op.getImm())) {
4429 Twine(
"expected a ") +
Twine(OffsetSize) +
"-bit signed offset");
4433 const unsigned OffsetSize = 16;
4434 if (!
isUIntN(OffsetSize,
Op.getImm())) {
4436 Twine(
"expected a ") +
Twine(OffsetSize) +
"-bit unsigned offset");
4443bool AMDGPUAsmParser::validateFlatOffset(
const MCInst &Inst,
4454 if (!hasFlatOffsets() &&
Op.getImm() != 0) {
4456 "flat offset modifier is not supported on this GPU");
4463 bool AllowNegative =
4466 if (!
isIntN(OffsetSize,
Op.getImm()) || (!AllowNegative &&
Op.getImm() < 0)) {
4468 Twine(
"expected a ") +
4469 (AllowNegative ?
Twine(OffsetSize) +
"-bit signed offset"
4470 :
Twine(OffsetSize - 1) +
"-bit unsigned offset"));
4479 for (
unsigned i = 2, e =
Operands.size(); i != e; ++i) {
4480 AMDGPUOperand &
Op = ((AMDGPUOperand &)*
Operands[i]);
4481 if (
Op.isSMEMOffset() ||
Op.isSMEMOffsetMod())
4482 return Op.getStartLoc();
4487bool AMDGPUAsmParser::validateSMEMOffset(
const MCInst &Inst,
4513 : (
isVI() || IsBuffer) ?
"expected a 20-bit unsigned offset"
4514 :
"expected a 21-bit signed offset");
4519bool AMDGPUAsmParser::validateSOPLiteral(
const MCInst &Inst)
const {
4528 const int OpIndices[] = { Src0Idx, Src1Idx };
4530 unsigned NumExprs = 0;
4531 unsigned NumLiterals = 0;
4534 for (
int OpIdx : OpIndices) {
4535 if (OpIdx == -1)
break;
4540 if (MO.
isImm() && !isInlineConstant(Inst, OpIdx)) {
4542 if (NumLiterals == 0 || LiteralValue !=
Value) {
4546 }
else if (MO.
isExpr()) {
4552 return NumLiterals + NumExprs <= 1;
4555bool AMDGPUAsmParser::validateOpSel(
const MCInst &Inst) {
4569 if (OpSelIdx != -1) {
4574 if (OpSelHiIdx != -1) {
4592bool AMDGPUAsmParser::validateNeg(
const MCInst &Inst,
int OpName) {
4617 int SrcMods[3] = {AMDGPU::OpName::src0_modifiers,
4618 AMDGPU::OpName::src1_modifiers,
4619 AMDGPU::OpName::src2_modifiers};
4621 for (
unsigned i = 0; i < 3; ++i) {
4631bool AMDGPUAsmParser::validateDPP(
const MCInst &Inst,
4635 if (DppCtrlIdx >= 0) {
4642 Error(S,
"DP ALU dpp only supports row_newbcast");
4648 bool IsDPP = DppCtrlIdx >= 0 || Dpp8Idx >= 0;
4658 Error(S,
"invalid operand for instruction");
4663 "src1 immediate operand invalid for instruction");
4673bool AMDGPUAsmParser::validateVccOperand(
unsigned Reg)
const {
4674 auto FB = getFeatureBits();
4675 return (FB[AMDGPU::FeatureWavefrontSize64] && Reg == AMDGPU::VCC) ||
4676 (FB[AMDGPU::FeatureWavefrontSize32] &&
Reg == AMDGPU::VCC_LO);
4680bool AMDGPUAsmParser::validateVOPLiteral(
const MCInst &Inst,
4686 !HasMandatoryLiteral && !
isVOPD(Opcode))
4691 unsigned NumExprs = 0;
4692 unsigned NumLiterals = 0;
4695 for (
int OpIdx : OpIndices) {
4705 if (MO.
isImm() && !isInlineConstant(Inst, OpIdx)) {
4711 if (!IsValid32Op && !isInt<32>(
Value) && !isUInt<32>(
Value)) {
4712 Error(getLitLoc(
Operands),
"invalid operand for instruction");
4716 if (IsFP64 && IsValid32Op)
4719 if (NumLiterals == 0 || LiteralValue !=
Value) {
4723 }
else if (MO.
isExpr()) {
4727 NumLiterals += NumExprs;
4732 if (!HasMandatoryLiteral && !getFeatureBits()[FeatureVOP3Literal]) {
4733 Error(getLitLoc(
Operands),
"literal operands are not supported");
4737 if (NumLiterals > 1) {
4738 Error(getLitLoc(
Operands,
true),
"only one unique literal operand is allowed");
4756 unsigned Sub =
MRI->getSubReg(
Op.getReg(), AMDGPU::sub0);
4757 auto Reg = Sub ? Sub :
Op.getReg();
4759 return AGPR32.
contains(Reg) ? 1 : 0;
4762bool AMDGPUAsmParser::validateAGPRLdSt(
const MCInst &Inst)
const {
4770 : AMDGPU::OpName::vdata;
4778 if (Data2Areg >= 0 && Data2Areg != DataAreg)
4782 auto FB = getFeatureBits();
4783 if (FB[AMDGPU::FeatureGFX90AInsts]) {
4784 if (DataAreg < 0 || DstAreg < 0)
4786 return DstAreg == DataAreg;
4789 return DstAreg < 1 && DataAreg < 1;
4792bool AMDGPUAsmParser::validateVGPRAlign(
const MCInst &Inst)
const {
4793 auto FB = getFeatureBits();
4794 if (!FB[AMDGPU::FeatureGFX90AInsts])
4805 unsigned Sub =
MRI->getSubReg(
Op.getReg(), AMDGPU::sub0);
4809 if (VGPR32.
contains(Sub) && ((Sub - AMDGPU::VGPR0) & 1))
4811 if (AGPR32.
contains(Sub) && ((Sub - AMDGPU::AGPR0) & 1))
4819 for (
unsigned i = 1, e =
Operands.size(); i != e; ++i) {
4820 AMDGPUOperand &
Op = ((AMDGPUOperand &)*
Operands[i]);
4822 return Op.getStartLoc();
4827bool AMDGPUAsmParser::validateBLGP(
const MCInst &Inst,
4837 auto FB = getFeatureBits();
4838 bool UsesNeg =
false;
4839 if (FB[AMDGPU::FeatureGFX940Insts]) {
4841 case AMDGPU::V_MFMA_F64_16X16X4F64_gfx940_acd:
4842 case AMDGPU::V_MFMA_F64_16X16X4F64_gfx940_vcd:
4843 case AMDGPU::V_MFMA_F64_4X4X4F64_gfx940_acd:
4844 case AMDGPU::V_MFMA_F64_4X4X4F64_gfx940_vcd:
4849 if (IsNeg == UsesNeg)
4853 UsesNeg ?
"invalid modifier: blgp is not supported"
4854 :
"invalid modifier: neg is not supported");
4859bool AMDGPUAsmParser::validateWaitCnt(
const MCInst &Inst,
4865 if (Opc != AMDGPU::S_WAITCNT_EXPCNT_gfx11 &&
4866 Opc != AMDGPU::S_WAITCNT_LGKMCNT_gfx11 &&
4867 Opc != AMDGPU::S_WAITCNT_VMCNT_gfx11 &&
4868 Opc != AMDGPU::S_WAITCNT_VSCNT_gfx11)
4874 if (Reg == AMDGPU::SGPR_NULL)
4878 Error(RegLoc,
"src0 must be null");
4882bool AMDGPUAsmParser::validateDS(
const MCInst &Inst,
4888 return validateGWS(Inst,
Operands);
4899 Error(S,
"gds modifier is not supported on this GPU");
4907bool AMDGPUAsmParser::validateGWS(
const MCInst &Inst,
4909 if (!getFeatureBits()[AMDGPU::FeatureGFX90AInsts])
4913 if (Opc != AMDGPU::DS_GWS_INIT_vi && Opc != AMDGPU::DS_GWS_BARRIER_vi &&
4914 Opc != AMDGPU::DS_GWS_SEMA_BR_vi)
4923 auto RegIdx =
Reg - (VGPR32.
contains(Reg) ? AMDGPU::VGPR0 : AMDGPU::AGPR0);
4926 Error(RegLoc,
"vgpr must be even aligned");
4933bool AMDGPUAsmParser::validateCoherencyBits(
const MCInst &Inst,
4935 const SMLoc &IDLoc) {
4937 AMDGPU::OpName::cpol);
4944 return validateTHAndScopeBits(Inst,
Operands, CPol);
4950 Error(S,
"cache policy is not supported for SMRD instructions");
4954 Error(IDLoc,
"invalid cache policy for SMEM instruction");
4963 if (!(TSFlags & AllowSCCModifier)) {
4968 "scc modifier is not supported for this instruction on this GPU");
4979 :
"instruction must use glc");
4987 &CStr.data()[CStr.find(
isGFX940() ?
"sc0" :
"glc")]);
4989 :
"instruction must not use glc");
4997bool AMDGPUAsmParser::validateTHAndScopeBits(
const MCInst &Inst,
4999 const unsigned CPol) {
5003 const unsigned Opcode = Inst.
getOpcode();
5015 return PrintError(
"instruction must use th:TH_ATOMIC_RETURN");
5023 return PrintError(
"invalid th value for SMEM instruction");
5030 return PrintError(
"scope and th combination is not valid");
5039 return PrintError(
"invalid th value for atomic instructions");
5040 }
else if (IsStore) {
5042 return PrintError(
"invalid th value for store instructions");
5045 return PrintError(
"invalid th value for load instructions");
5051bool AMDGPUAsmParser::validateTFE(
const MCInst &Inst,
5054 if (
Desc.mayStore() &&
5058 Error(Loc,
"TFE modifier has no meaning for store instructions");
5066bool AMDGPUAsmParser::validateInstruction(
const MCInst &Inst,
5069 if (
auto ErrMsg = validateLdsDirect(Inst)) {
5073 if (!validateSOPLiteral(Inst)) {
5075 "only one unique literal operand is allowed");
5078 if (!validateVOPLiteral(Inst,
Operands)) {
5081 if (!validateConstantBusLimitations(Inst,
Operands)) {
5084 if (!validateVOPDRegBankConstraints(Inst,
Operands)) {
5087 if (!validateIntClampSupported(Inst)) {
5089 "integer clamping is not supported on this GPU");
5092 if (!validateOpSel(Inst)) {
5094 "invalid op_sel operand");
5097 if (!validateNeg(Inst, AMDGPU::OpName::neg_lo)) {
5099 "invalid neg_lo operand");
5102 if (!validateNeg(Inst, AMDGPU::OpName::neg_hi)) {
5104 "invalid neg_hi operand");
5107 if (!validateDPP(Inst,
Operands)) {
5111 if (!validateMIMGD16(Inst)) {
5113 "d16 modifier is not supported on this GPU");
5116 if (!validateMIMGDim(Inst,
Operands)) {
5117 Error(IDLoc,
"missing dim operand");
5120 if (!validateMIMGMSAA(Inst)) {
5122 "invalid dim; must be MSAA type");
5125 if (!validateMIMGDataSize(Inst, IDLoc)) {
5128 if (!validateMIMGAddrSize(Inst, IDLoc))
5130 if (!validateMIMGAtomicDMask(Inst)) {
5132 "invalid atomic image dmask");
5135 if (!validateMIMGGatherDMask(Inst)) {
5137 "invalid image_gather dmask: only one bit must be set");
5140 if (!validateMovrels(Inst,
Operands)) {
5143 if (!validateOffset(Inst,
Operands)) {
5146 if (!validateMAIAccWrite(Inst,
Operands)) {
5149 if (!validateMAISrc2(Inst,
Operands)) {
5152 if (!validateMFMA(Inst,
Operands)) {
5155 if (!validateCoherencyBits(Inst,
Operands, IDLoc)) {
5159 if (!validateAGPRLdSt(Inst)) {
5160 Error(IDLoc, getFeatureBits()[AMDGPU::FeatureGFX90AInsts]
5161 ?
"invalid register class: data and dst should be all VGPR or AGPR"
5162 :
"invalid register class: agpr loads and stores not supported on this GPU"
5166 if (!validateVGPRAlign(Inst)) {
5168 "invalid register class: vgpr tuples must be 64 bit aligned");
5175 if (!validateBLGP(Inst,
Operands)) {
5179 if (!validateDivScale(Inst)) {
5180 Error(IDLoc,
"ABS not allowed in VOP3B instructions");
5183 if (!validateWaitCnt(Inst,
Operands)) {
5186 if (!validateTFE(Inst,
Operands)) {
5195 unsigned VariantID = 0);
5199 unsigned VariantID);
5201bool AMDGPUAsmParser::isSupportedMnemo(
StringRef Mnemo,
5206bool AMDGPUAsmParser::isSupportedMnemo(
StringRef Mnemo,
5209 for (
auto Variant : Variants) {