34#define DEBUG_TYPE "spirv-module-analysis"
38 cl::desc(
"Dump MIR with SPIR-V dependencies info"),
43 cl::desc(
"SPIR-V capabilities to avoid if there are "
44 "other options enabling a feature"),
47 "SPIR-V Shader capability")));
61 unsigned DefaultVal = 0) {
63 const auto &
Op = MdNode->getOperand(
OpIndex);
70getSymbolicOperandRequirements(SPIRV::OperandCategory::OperandCategory Category,
76 AvoidCaps.
S.
insert(SPIRV::Capability::Shader);
78 AvoidCaps.
S.
insert(SPIRV::Capability::Kernel);
83 bool MinVerOK = SPIRVVersion.
empty() || SPIRVVersion >= ReqMinVer;
85 ReqMaxVer.
empty() || SPIRVVersion.
empty() || SPIRVVersion <= ReqMaxVer;
88 if (ReqCaps.
empty()) {
89 if (ReqExts.
empty()) {
90 if (MinVerOK && MaxVerOK)
91 return {
true, {}, {}, ReqMinVer, ReqMaxVer};
94 }
else if (MinVerOK && MaxVerOK) {
95 if (ReqCaps.
size() == 1) {
96 auto Cap = ReqCaps[0];
99 SPIRV::OperandCategory::CapabilityOperand, Cap));
100 return {
true, {Cap}, std::move(ReqExts), ReqMinVer, ReqMaxVer};
110 for (
auto Cap : ReqCaps)
113 for (
size_t i = 0, Sz = UseCaps.
size(); i < Sz; ++i) {
114 auto Cap = UseCaps[i];
115 if (i == Sz - 1 || !AvoidCaps.
S.
contains(Cap)) {
117 SPIRV::OperandCategory::CapabilityOperand, Cap));
118 return {
true, {Cap}, std::move(ReqExts), ReqMinVer, ReqMaxVer};
126 if (
llvm::all_of(ReqExts, [&ST](
const SPIRV::Extension::Extension &Ext) {
127 return ST.canUseExtension(Ext);
138void SPIRVModuleAnalysis::setBaseInfo(
const Module &M) {
142 MAI.RegisterAliasTable.clear();
143 MAI.InstrsToDelete.clear();
144 MAI.GlobalObjMap.clear();
145 MAI.GlobalVarList.clear();
146 MAI.ExtInstSetMap.clear();
148 MAI.Reqs.initAvailableCapabilities(*ST);
151 if (
auto MemModel =
M.getNamedMetadata(
"spirv.MemoryModel")) {
152 auto MemMD = MemModel->getOperand(0);
153 MAI.Addr =
static_cast<SPIRV::AddressingModel::AddressingModel
>(
154 getMetadataUInt(MemMD, 0));
156 static_cast<SPIRV::MemoryModel::MemoryModel
>(getMetadataUInt(MemMD, 1));
159 MAI.Mem = ST->isShader() ? SPIRV::MemoryModel::GLSL450
160 : SPIRV::MemoryModel::OpenCL;
161 if (
MAI.Mem == SPIRV::MemoryModel::OpenCL) {
162 unsigned PtrSize = ST->getPointerSize();
163 MAI.Addr = PtrSize == 32 ? SPIRV::AddressingModel::Physical32
164 : PtrSize == 64 ? SPIRV::AddressingModel::Physical64
165 : SPIRV::AddressingModel::Logical;
168 MAI.Addr = SPIRV::AddressingModel::Logical;
173 if (
auto VerNode =
M.getNamedMetadata(
"opencl.ocl.version")) {
174 MAI.SrcLang = SPIRV::SourceLanguage::OpenCL_C;
177 assert(VerNode->getNumOperands() > 0 &&
"Invalid SPIR");
178 auto VersionMD = VerNode->getOperand(0);
179 unsigned MajorNum = getMetadataUInt(VersionMD, 0, 2);
180 unsigned MinorNum = getMetadataUInt(VersionMD, 1);
181 unsigned RevNum = getMetadataUInt(VersionMD, 2);
184 (std::max(1U, MajorNum) * 100 + MinorNum) * 1000 + RevNum;
187 if (
auto *CxxVerNode =
M.getNamedMetadata(
"opencl.cxx.version")) {
188 assert(CxxVerNode->getNumOperands() > 0 &&
"Invalid SPIR");
189 auto *CxxMD = CxxVerNode->getOperand(0);
191 (getMetadataUInt(CxxMD, 0) * 100 + getMetadataUInt(CxxMD, 1)) * 1000 +
192 getMetadataUInt(CxxMD, 2);
193 if ((
MAI.SrcLangVersion == 200000 && CxxVer == 100000) ||
194 (
MAI.SrcLangVersion == 300000 && CxxVer == 202100000)) {
195 MAI.SrcLang = SPIRV::SourceLanguage::CPP_for_OpenCL;
196 MAI.SrcLangVersion = CxxVer;
199 "opencl cxx version is not compatible with opencl c version!");
207 if (!ST->isShader()) {
208 MAI.SrcLang = SPIRV::SourceLanguage::OpenCL_CPP;
209 MAI.SrcLangVersion = 100000;
211 MAI.SrcLang = SPIRV::SourceLanguage::Unknown;
212 MAI.SrcLangVersion = 0;
216 if (
auto ExtNode =
M.getNamedMetadata(
"opencl.used.extensions")) {
217 for (
unsigned I = 0,
E = ExtNode->getNumOperands();
I !=
E; ++
I) {
218 MDNode *MD = ExtNode->getOperand(
I);
228 MAI.Reqs.getAndAddRequirements(SPIRV::OperandCategory::MemoryModelOperand,
230 MAI.Reqs.getAndAddRequirements(SPIRV::OperandCategory::SourceLanguageOperand,
232 MAI.Reqs.getAndAddRequirements(SPIRV::OperandCategory::AddressingModelOperand,
235 if (
MAI.Mem == SPIRV::MemoryModel::VulkanKHR)
236 MAI.Reqs.addExtension(SPIRV::Extension::SPV_KHR_vulkan_memory_model);
238 if (!ST->isShader()) {
240 MAI.ExtInstSetMap[
static_cast<unsigned>(
241 SPIRV::InstructionSet::OpenCL_std)] =
MAI.getNextIDRegister();
252 if (
UseMI.getOpcode() != SPIRV::OpDecorate &&
253 UseMI.getOpcode() != SPIRV::OpMemberDecorate)
256 for (
unsigned I = 0;
I <
UseMI.getNumOperands(); ++
I) {
274 for (
unsigned i = 0; i <
MI.getNumOperands(); ++i) {
283 unsigned Opcode =
MI.getOpcode();
284 if ((Opcode == SPIRV::OpDecorate) && i >= 2) {
285 unsigned DecorationID =
MI.getOperand(1).getImm();
286 if (DecorationID != SPIRV::Decoration::FuncParamAttr &&
287 DecorationID != SPIRV::Decoration::UserSemantic &&
288 DecorationID != SPIRV::Decoration::CacheControlLoadINTEL &&
289 DecorationID != SPIRV::Decoration::CacheControlStoreINTEL)
295 if (!UseDefReg && MO.
isDef()) {
303 dbgs() <<
"Unexpectedly, no global id found for the operand ";
305 dbgs() <<
"\nInstruction: ";
324 appendDecorationsForReg(
MI.getMF()->getRegInfo(), DefReg, Signature);
331 unsigned Opcode =
MI.getOpcode();
333 case SPIRV::OpTypeForwardPointer:
336 case SPIRV::OpVariable:
337 return static_cast<SPIRV::StorageClass::StorageClass
>(
338 MI.getOperand(2).
getImm()) != SPIRV::StorageClass::Function;
339 case SPIRV::OpFunction:
340 case SPIRV::OpFunctionParameter:
343 if (GR->hasConstFunPtr() && Opcode == SPIRV::OpUndef) {
346 if (
UseMI.getOpcode() != SPIRV::OpConstantFunctionPointerINTEL)
352 MAI.setSkipEmission(&
MI);
356 return TII->isTypeDeclInstr(
MI) || TII->isConstantInstr(
MI) ||
357 TII->isInlineAsmDefInstr(
MI);
363void SPIRVModuleAnalysis::visitFunPtrUse(
365 std::map<const Value *, unsigned> &GlobalToGReg,
const MachineFunction *MF,
367 const MachineOperand *OpFunDef =
368 GR->getFunctionDefinitionByUse(&
MI.getOperand(2));
371 const MachineInstr *OpDefMI = OpFunDef->
getParent();
374 const MachineRegisterInfo &FunDefMRI = FunDefMF->
getRegInfo();
376 visitDecl(FunDefMRI, SignatureToGReg, GlobalToGReg, FunDefMF, *OpDefMI);
378 }
while (OpDefMI && (OpDefMI->
getOpcode() == SPIRV::OpFunction ||
379 OpDefMI->
getOpcode() == SPIRV::OpFunctionParameter));
381 MCRegister GlobalFunDefReg =
382 MAI.getRegisterAlias(FunDefMF, OpFunDef->
getReg());
384 "Function definition must refer to a global register");
385 MAI.setRegisterAlias(MF, OpReg, GlobalFunDefReg);
390void SPIRVModuleAnalysis::visitDecl(
392 std::map<const Value *, unsigned> &GlobalToGReg,
const MachineFunction *MF,
394 unsigned Opcode =
MI.getOpcode();
397 for (
const MachineOperand &MO :
MI.operands()) {
402 if (Opcode == SPIRV::OpConstantFunctionPointerINTEL &&
404 visitFunPtrUse(OpReg, SignatureToGReg, GlobalToGReg, MF,
MI);
408 if (
MAI.hasRegisterAlias(MF, MO.
getReg()))
412 if (isDeclSection(MRI, *OpDefMI))
413 visitDecl(MRI, SignatureToGReg, GlobalToGReg, MF, *OpDefMI);
419 dbgs() <<
"Unexpectedly, no unique definition for the operand ";
421 dbgs() <<
"\nInstruction: ";
426 "No unique definition is found for the virtual register");
430 bool IsFunDef =
false;
431 if (TII->isSpecConstantInstr(
MI)) {
432 GReg =
MAI.getNextIDRegister();
434 }
else if (Opcode == SPIRV::OpFunction ||
435 Opcode == SPIRV::OpFunctionParameter) {
436 GReg = handleFunctionOrParameter(MF,
MI, GlobalToGReg, IsFunDef);
437 }
else if (Opcode == SPIRV::OpTypeStruct ||
438 Opcode == SPIRV::OpConstantComposite) {
439 GReg = handleTypeDeclOrConstant(
MI, SignatureToGReg);
440 const MachineInstr *NextInstr =
MI.getNextNode();
442 ((Opcode == SPIRV::OpTypeStruct &&
443 NextInstr->
getOpcode() == SPIRV::OpTypeStructContinuedINTEL) ||
444 (Opcode == SPIRV::OpConstantComposite &&
446 SPIRV::OpConstantCompositeContinuedINTEL))) {
447 MCRegister Tmp = handleTypeDeclOrConstant(*NextInstr, SignatureToGReg);
449 MAI.setSkipEmission(NextInstr);
452 }
else if (TII->isTypeDeclInstr(
MI) || TII->isConstantInstr(
MI) ||
453 TII->isInlineAsmDefInstr(
MI)) {
454 GReg = handleTypeDeclOrConstant(
MI, SignatureToGReg);
455 }
else if (Opcode == SPIRV::OpVariable) {
456 GReg = handleVariable(MF,
MI, GlobalToGReg);
459 dbgs() <<
"\nInstruction: ";
465 MAI.setRegisterAlias(MF,
MI.getOperand(0).getReg(), GReg);
467 MAI.setSkipEmission(&
MI);
470MCRegister SPIRVModuleAnalysis::handleFunctionOrParameter(
472 std::map<const Value *, unsigned> &GlobalToGReg,
bool &IsFunDef) {
473 const Value *GObj = GR->getGlobalObject(MF,
MI.getOperand(0).getReg());
474 assert(GObj &&
"Unregistered global definition");
478 assert(
F &&
"Expected a reference to a function or an argument");
479 IsFunDef = !
F->isDeclaration();
480 auto [It,
Inserted] = GlobalToGReg.try_emplace(GObj);
483 MCRegister GReg =
MAI.getNextIDRegister();
491SPIRVModuleAnalysis::handleTypeDeclOrConstant(
const MachineInstr &
MI,
494 auto [It,
Inserted] = SignatureToGReg.try_emplace(MISign);
497 MCRegister GReg =
MAI.getNextIDRegister();
503MCRegister SPIRVModuleAnalysis::handleVariable(
505 std::map<const Value *, unsigned> &GlobalToGReg) {
506 MAI.GlobalVarList.push_back(&
MI);
507 const Value *GObj = GR->getGlobalObject(MF,
MI.getOperand(0).getReg());
508 assert(GObj &&
"Unregistered global definition");
509 auto [It,
Inserted] = GlobalToGReg.try_emplace(GObj);
512 MCRegister GReg =
MAI.getNextIDRegister();
516 MAI.GlobalObjMap[GV] = GReg;
520void SPIRVModuleAnalysis::collectDeclarations(
const Module &M) {
522 std::map<const Value *, unsigned> GlobalToGReg;
523 for (
const Function &
F : M) {
524 MachineFunction *MF = MMI->getMachineFunction(
F);
527 const MachineRegisterInfo &MRI = MF->
getRegInfo();
528 unsigned PastHeader = 0;
529 for (MachineBasicBlock &
MBB : *MF) {
530 for (MachineInstr &
MI :
MBB) {
531 if (
MI.getNumOperands() == 0)
533 unsigned Opcode =
MI.getOpcode();
534 if (Opcode == SPIRV::OpFunction) {
535 if (PastHeader == 0) {
539 }
else if (Opcode == SPIRV::OpFunctionParameter) {
542 }
else if (PastHeader > 0) {
546 const MachineOperand &DefMO =
MI.getOperand(0);
548 case SPIRV::OpExtension:
549 MAI.Reqs.addExtension(SPIRV::Extension::Extension(DefMO.
getImm()));
550 MAI.setSkipEmission(&
MI);
552 case SPIRV::OpCapability:
553 MAI.Reqs.addCapability(SPIRV::Capability::Capability(DefMO.
getImm()));
554 MAI.setSkipEmission(&
MI);
559 if (DefMO.
isReg() && isDeclSection(MRI,
MI) &&
560 !
MAI.hasRegisterAlias(MF, DefMO.
getReg()))
561 visitDecl(MRI, SignatureToGReg, GlobalToGReg, MF,
MI);
574 if (
MI.getOpcode() == SPIRV::OpDecorate) {
576 auto Dec =
MI.getOperand(1).getImm();
577 if (Dec == SPIRV::Decoration::LinkageAttributes) {
578 auto Lnk =
MI.getOperand(
MI.getNumOperands() - 1).getImm();
579 if (Lnk == SPIRV::LinkageType::Import) {
584 MAI.GlobalObjMap[ImportedFunc] =
585 MAI.getRegisterAlias(
MI.getMF(), Target);
588 }
else if (
MI.getOpcode() == SPIRV::OpFunction) {
591 MCRegister GlobalReg =
MAI.getRegisterAlias(
MI.getMF(),
Reg);
593 MAI.GlobalObjMap[
F] = GlobalReg;
605 auto FoundMI = IS.insert(std::move(MISign));
606 if (!FoundMI.second) {
607 if (
MI.getOpcode() == SPIRV::OpDecorate) {
609 "Decoration instructions must have at least 2 operands");
611 "Only OpDecorate instructions can be duplicates");
616 if (
MI.getOperand(1).getImm() != SPIRV::Decoration::FPFastMathMode)
621 if (instrToSignature(*OrigMI, MAI,
true) == MISign) {
622 assert(OrigMI->getNumOperands() ==
MI.getNumOperands() &&
623 "Original instruction must have the same number of operands");
625 OrigMI->getNumOperands() == 3 &&
626 "FPFastMathMode decoration must have 3 operands for OpDecorate");
627 unsigned OrigFlags = OrigMI->getOperand(2).getImm();
628 unsigned NewFlags =
MI.getOperand(2).getImm();
629 if (OrigFlags == NewFlags)
633 unsigned FinalFlags = OrigFlags | NewFlags;
635 <<
"Warning: Conflicting FPFastMathMode decoration flags "
637 << *OrigMI <<
"Original flags: " << OrigFlags
638 <<
", new flags: " << NewFlags
639 <<
". They will be merged on a best effort basis, but not "
640 "validated. Final flags: "
641 << FinalFlags <<
"\n";
648 assert(
false &&
"No original instruction found for the duplicate "
649 "OpDecorate, but we found one in IS.");
662void SPIRVModuleAnalysis::processOtherInstrs(
const Module &M) {
664 for (
const Function &
F : M) {
665 if (
F.isDeclaration())
667 MachineFunction *MF = MMI->getMachineFunction(
F);
670 for (MachineBasicBlock &
MBB : *MF)
671 for (MachineInstr &
MI :
MBB) {
672 if (
MAI.getSkipEmission(&
MI))
674 const unsigned OpCode =
MI.getOpcode();
675 if (OpCode == SPIRV::OpString) {
677 }
else if (OpCode == SPIRV::OpExtInst &&
MI.getOperand(2).isImm() &&
678 MI.getOperand(2).getImm() ==
679 SPIRV::InstructionSet::
680 NonSemantic_Shader_DebugInfo_100) {
687 MachineOperand Ins =
MI.getOperand(3);
688 namespace NS = SPIRV::NonSemanticExtInst;
689 static constexpr int64_t GlobalNonSemanticDITy[] = {
690 NS::DebugSource, NS::DebugCompilationUnit, NS::DebugInfoNone,
691 NS::DebugTypeBasic, NS::DebugTypePointer};
692 bool IsGlobalDI =
false;
693 for (
unsigned Idx = 0; Idx < std::size(GlobalNonSemanticDITy); ++Idx)
694 IsGlobalDI |= Ins.
getImm() == GlobalNonSemanticDITy[Idx];
697 }
else if (OpCode == SPIRV::OpName || OpCode == SPIRV::OpMemberName) {
699 }
else if (OpCode == SPIRV::OpEntryPoint) {
701 }
else if (TII->isAliasingInstr(
MI)) {
703 }
else if (TII->isDecorationInstr(
MI)) {
705 collectFuncNames(
MI, &
F);
706 }
else if (TII->isConstantInstr(
MI)) {
710 }
else if (OpCode == SPIRV::OpFunction) {
711 collectFuncNames(
MI, &
F);
712 }
else if (OpCode == SPIRV::OpTypeForwardPointer) {
722void SPIRVModuleAnalysis::numberRegistersGlobally(
const Module &M) {
723 for (
const Function &
F : M) {
724 if (
F.isDeclaration())
726 MachineFunction *MF = MMI->getMachineFunction(
F);
728 for (MachineBasicBlock &
MBB : *MF) {
729 for (MachineInstr &
MI :
MBB) {
730 for (MachineOperand &
Op :
MI.operands()) {
734 if (
MAI.hasRegisterAlias(MF,
Reg))
736 MCRegister NewReg =
MAI.getNextIDRegister();
737 MAI.setRegisterAlias(MF,
Reg, NewReg);
739 if (
MI.getOpcode() != SPIRV::OpExtInst)
741 auto Set =
MI.getOperand(2).getImm();
742 auto [It,
Inserted] =
MAI.ExtInstSetMap.try_emplace(Set);
744 It->second =
MAI.getNextIDRegister();
752 SPIRV::OperandCategory::OperandCategory Category, uint32_t i,
754 addRequirements(getSymbolicOperandRequirements(Category, i, ST, *
this));
757void SPIRV::RequirementHandler::recursiveAddCapabilities(
759 for (
const auto &Cap : ToPrune) {
763 recursiveAddCapabilities(ImplicitDecls);
768 for (
const auto &Cap : ToAdd) {
769 bool IsNewlyInserted = AllCaps.insert(Cap).second;
770 if (!IsNewlyInserted)
774 recursiveAddCapabilities(ImplicitDecls);
775 MinimalCaps.push_back(Cap);
780 const SPIRV::Requirements &Req) {
784 if (Req.
Cap.has_value())
785 addCapabilities({Req.
Cap.value()});
787 addExtensions(Req.
Exts);
790 if (!MaxVersion.empty() && Req.
MinVer > MaxVersion) {
792 <<
" and <= " << MaxVersion <<
"\n");
796 if (MinVersion.empty() || Req.
MinVer > MinVersion)
801 if (!MinVersion.empty() && Req.
MaxVer < MinVersion) {
803 <<
" and >= " << MinVersion <<
"\n");
807 if (MaxVersion.empty() || Req.
MaxVer < MaxVersion)
813 const SPIRVSubtarget &ST)
const {
815 bool IsSatisfiable =
true;
816 auto TargetVer =
ST.getSPIRVVersion();
818 if (!MaxVersion.empty() && !TargetVer.empty() && MaxVersion < TargetVer) {
820 dbgs() <<
"Target SPIR-V version too high for required features\n"
821 <<
"Required max version: " << MaxVersion <<
" target version "
822 << TargetVer <<
"\n");
823 IsSatisfiable =
false;
826 if (!MinVersion.empty() && !TargetVer.empty() && MinVersion > TargetVer) {
827 LLVM_DEBUG(
dbgs() <<
"Target SPIR-V version too low for required features\n"
828 <<
"Required min version: " << MinVersion
829 <<
" target version " << TargetVer <<
"\n");
830 IsSatisfiable =
false;
833 if (!MinVersion.empty() && !MaxVersion.empty() && MinVersion > MaxVersion) {
836 <<
"Version is too low for some features and too high for others.\n"
837 <<
"Required SPIR-V min version: " << MinVersion
838 <<
" required SPIR-V max version " << MaxVersion <<
"\n");
839 IsSatisfiable =
false;
842 AvoidCapabilitiesSet AvoidCaps;
844 AvoidCaps.
S.
insert(SPIRV::Capability::Shader);
846 AvoidCaps.
S.
insert(SPIRV::Capability::Kernel);
848 for (
auto Cap : MinimalCaps) {
849 if (AvailableCaps.contains(Cap) && !AvoidCaps.
S.
contains(Cap))
853 OperandCategory::CapabilityOperand, Cap)
855 IsSatisfiable =
false;
858 for (
auto Ext : AllExtensions) {
859 if (
ST.canUseExtension(Ext))
863 OperandCategory::ExtensionOperand, Ext)
865 IsSatisfiable =
false;
874 for (
const auto Cap : ToAdd)
875 if (AvailableCaps.insert(Cap).second)
877 SPIRV::OperandCategory::CapabilityOperand, Cap));
881 const Capability::Capability
ToRemove,
882 const Capability::Capability IfPresent) {
883 if (AllCaps.contains(IfPresent))
891 addAvailableCaps({Capability::Shader, Capability::Linkage, Capability::Int8,
894 if (
ST.isAtLeastSPIRVVer(VersionTuple(1, 3)))
896 Capability::GroupNonUniformVote,
897 Capability::GroupNonUniformArithmetic,
898 Capability::GroupNonUniformBallot,
899 Capability::GroupNonUniformClustered,
900 Capability::GroupNonUniformShuffle,
901 Capability::GroupNonUniformShuffleRelative,
902 Capability::GroupNonUniformQuad});
904 if (
ST.isAtLeastSPIRVVer(VersionTuple(1, 6)))
906 Capability::DotProductInput4x8Bit,
907 Capability::DotProductInput4x8BitPacked,
908 Capability::DemoteToHelperInvocation});
911 for (
auto Extension :
ST.getAllAvailableExtensions()) {
917 if (!
ST.isShader()) {
918 initAvailableCapabilitiesForOpenCL(ST);
923 initAvailableCapabilitiesForVulkan(ST);
930void RequirementHandler::initAvailableCapabilitiesForOpenCL(
931 const SPIRVSubtarget &ST) {
934 Capability::Kernel, Capability::Vector16,
935 Capability::Groups, Capability::GenericPointer,
936 Capability::StorageImageWriteWithoutFormat,
937 Capability::StorageImageReadWithoutFormat});
938 if (
ST.hasOpenCLFullProfile())
940 if (
ST.hasOpenCLImageSupport()) {
942 Capability::Image1D, Capability::SampledBuffer,
943 Capability::ImageBuffer});
944 if (
ST.isAtLeastOpenCLVer(VersionTuple(2, 0)))
947 if (
ST.isAtLeastSPIRVVer(VersionTuple(1, 1)) &&
948 ST.isAtLeastOpenCLVer(VersionTuple(2, 2)))
950 if (
ST.isAtLeastSPIRVVer(VersionTuple(1, 4)))
951 addAvailableCaps({Capability::DenormPreserve, Capability::DenormFlushToZero,
952 Capability::SignedZeroInfNanPreserve,
953 Capability::RoundingModeRTE,
954 Capability::RoundingModeRTZ});
961void RequirementHandler::initAvailableCapabilitiesForVulkan(
962 const SPIRVSubtarget &ST) {
968 Capability::GroupNonUniform,
970 Capability::SampledBuffer,
971 Capability::ImageBuffer,
972 Capability::UniformBufferArrayDynamicIndexing,
973 Capability::SampledImageArrayDynamicIndexing,
974 Capability::StorageBufferArrayDynamicIndexing,
975 Capability::StorageImageArrayDynamicIndexing,
976 Capability::DerivativeControl,
978 Capability::ImageQuery,
979 Capability::ImageGatherExtended,
980 Capability::Addresses,
981 Capability::VulkanMemoryModelKHR,
982 Capability::StorageImageExtendedFormats,
983 Capability::StorageImageMultisample,
984 Capability::ImageMSArray});
987 if (
ST.isAtLeastSPIRVVer(VersionTuple(1, 5))) {
989 {Capability::ShaderNonUniformEXT, Capability::RuntimeDescriptorArrayEXT,
990 Capability::InputAttachmentArrayDynamicIndexingEXT,
991 Capability::UniformTexelBufferArrayDynamicIndexingEXT,
992 Capability::StorageTexelBufferArrayDynamicIndexingEXT,
993 Capability::UniformBufferArrayNonUniformIndexingEXT,
994 Capability::SampledImageArrayNonUniformIndexingEXT,
995 Capability::StorageBufferArrayNonUniformIndexingEXT,
996 Capability::StorageImageArrayNonUniformIndexingEXT,
997 Capability::InputAttachmentArrayNonUniformIndexingEXT,
998 Capability::UniformTexelBufferArrayNonUniformIndexingEXT,
999 Capability::StorageTexelBufferArrayNonUniformIndexingEXT});
1003 if (
ST.isAtLeastSPIRVVer(VersionTuple(1, 6)))
1005 Capability::StorageImageReadWithoutFormat});
1013static void addOpDecorateReqs(
const MachineInstr &
MI,
unsigned DecIndex,
1016 int64_t DecOp =
MI.getOperand(DecIndex).getImm();
1017 auto Dec =
static_cast<SPIRV::Decoration::Decoration
>(DecOp);
1019 SPIRV::OperandCategory::DecorationOperand, Dec, ST, Reqs));
1021 if (Dec == SPIRV::Decoration::BuiltIn) {
1022 int64_t BuiltInOp =
MI.getOperand(DecIndex + 1).getImm();
1023 auto BuiltIn =
static_cast<SPIRV::BuiltIn::BuiltIn
>(BuiltInOp);
1025 SPIRV::OperandCategory::BuiltInOperand, BuiltIn, ST, Reqs));
1026 }
else if (Dec == SPIRV::Decoration::LinkageAttributes) {
1027 int64_t LinkageOp =
MI.getOperand(
MI.getNumOperands() - 1).getImm();
1028 SPIRV::LinkageType::LinkageType LnkType =
1029 static_cast<SPIRV::LinkageType::LinkageType
>(LinkageOp);
1030 if (LnkType == SPIRV::LinkageType::LinkOnceODR)
1031 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_linkonce_odr);
1032 else if (LnkType == SPIRV::LinkageType::Weak)
1033 Reqs.
addExtension(SPIRV::Extension::SPV_AMD_weak_linkage);
1034 }
else if (Dec == SPIRV::Decoration::CacheControlLoadINTEL ||
1035 Dec == SPIRV::Decoration::CacheControlStoreINTEL) {
1036 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_cache_controls);
1037 }
else if (Dec == SPIRV::Decoration::HostAccessINTEL) {
1038 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_global_variable_host_access);
1039 }
else if (Dec == SPIRV::Decoration::InitModeINTEL ||
1040 Dec == SPIRV::Decoration::ImplementInRegisterMapINTEL) {
1042 SPIRV::Extension::SPV_INTEL_global_variable_fpga_decorations);
1043 }
else if (Dec == SPIRV::Decoration::NonUniformEXT) {
1045 }
else if (Dec == SPIRV::Decoration::FPMaxErrorDecorationINTEL) {
1047 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_fp_max_error);
1048 }
else if (Dec == SPIRV::Decoration::FPFastMathMode) {
1049 if (
ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2)) {
1051 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_float_controls2);
1060 assert(
MI.getNumOperands() >= 8 &&
"Insufficient operands for OpTypeImage");
1063 int64_t ImgFormatOp =
MI.getOperand(7).getImm();
1064 auto ImgFormat =
static_cast<SPIRV::ImageFormat::ImageFormat
>(ImgFormatOp);
1068 bool IsArrayed =
MI.getOperand(4).getImm() == 1;
1069 bool IsMultisampled =
MI.getOperand(5).getImm() == 1;
1070 bool NoSampler =
MI.getOperand(6).getImm() == 2;
1073 switch (
MI.getOperand(2).getImm()) {
1074 case SPIRV::Dim::DIM_1D:
1076 : SPIRV::Capability::Sampled1D);
1078 case SPIRV::Dim::DIM_2D:
1079 if (IsMultisampled && NoSampler)
1081 if (IsMultisampled && IsArrayed)
1084 case SPIRV::Dim::DIM_3D:
1086 case SPIRV::Dim::DIM_Cube:
1090 : SPIRV::Capability::SampledCubeArray);
1092 case SPIRV::Dim::DIM_Rect:
1094 : SPIRV::Capability::SampledRect);
1096 case SPIRV::Dim::DIM_Buffer:
1098 : SPIRV::Capability::SampledBuffer);
1100 case SPIRV::Dim::DIM_SubpassData:
1112 Reqs.
addExtension(SPIRV::Extension::SPV_EXT_shader_image_int64);
1116 if (!
ST.isShader()) {
1117 if (
MI.getNumOperands() > 8 &&
1118 MI.getOperand(8).getImm() == SPIRV::AccessQualifier::ReadWrite)
1127 TypeDef->
getOpcode() == SPIRV::OpTypeFloat &&
1133#define ATOM_FLT_REQ_EXT_MSG(ExtName) \
1134 "The atomic float instruction requires the following SPIR-V " \
1135 "extension: SPV_EXT_shader_atomic_float" ExtName
1136static void AddAtomicVectorFloatRequirements(
const MachineInstr &
MI,
1140 MI.getMF()->getRegInfo().getVRegDef(
MI.getOperand(1).getReg());
1143 if (Rank != 2 && Rank != 4)
1145 "must be a 2-component or 4 component vector");
1150 if (EltTypeDef->
getOpcode() != SPIRV::OpTypeFloat ||
1153 "The element type for the result type of an atomic vector float "
1154 "instruction must be a 16-bit floating-point scalar");
1156 if (isBFloat16Type(EltTypeDef))
1158 "The element type for the result type of an atomic vector float "
1159 "instruction cannot be a bfloat16 scalar");
1160 if (!
ST.canUseExtension(SPIRV::Extension::SPV_NV_shader_atomic_fp16_vector))
1162 "The atomic float16 vector instruction requires the following SPIR-V "
1163 "extension: SPV_NV_shader_atomic_fp16_vector");
1165 Reqs.
addExtension(SPIRV::Extension::SPV_NV_shader_atomic_fp16_vector);
1166 Reqs.
addCapability(SPIRV::Capability::AtomicFloat16VectorNV);
1173 "Expect register operand in atomic float instruction");
1174 Register TypeReg =
MI.getOperand(1).getReg();
1177 if (TypeDef->
getOpcode() == SPIRV::OpTypeVector)
1178 return AddAtomicVectorFloatRequirements(
MI, Reqs, ST);
1180 if (TypeDef->
getOpcode() != SPIRV::OpTypeFloat)
1182 "floating-point type scalar");
1185 unsigned Op =
MI.getOpcode();
1186 if (
Op == SPIRV::OpAtomicFAddEXT) {
1187 if (!
ST.canUseExtension(SPIRV::Extension::SPV_EXT_shader_atomic_float_add))
1189 Reqs.
addExtension(SPIRV::Extension::SPV_EXT_shader_atomic_float_add);
1192 if (isBFloat16Type(TypeDef)) {
1193 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_16bit_atomics))
1195 "The atomic bfloat16 instruction requires the following SPIR-V "
1196 "extension: SPV_INTEL_16bit_atomics",
1198 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_16bit_atomics);
1199 Reqs.
addCapability(SPIRV::Capability::AtomicBFloat16AddINTEL);
1201 if (!
ST.canUseExtension(
1202 SPIRV::Extension::SPV_EXT_shader_atomic_float16_add))
1204 Reqs.
addExtension(SPIRV::Extension::SPV_EXT_shader_atomic_float16_add);
1216 "Unexpected floating-point type width in atomic float instruction");
1219 if (!
ST.canUseExtension(
1220 SPIRV::Extension::SPV_EXT_shader_atomic_float_min_max))
1222 Reqs.
addExtension(SPIRV::Extension::SPV_EXT_shader_atomic_float_min_max);
1225 if (isBFloat16Type(TypeDef)) {
1226 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_16bit_atomics))
1228 "The atomic bfloat16 instruction requires the following SPIR-V "
1229 "extension: SPV_INTEL_16bit_atomics",
1231 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_16bit_atomics);
1232 Reqs.
addCapability(SPIRV::Capability::AtomicBFloat16MinMaxINTEL);
1234 Reqs.
addCapability(SPIRV::Capability::AtomicFloat16MinMaxEXT);
1238 Reqs.
addCapability(SPIRV::Capability::AtomicFloat32MinMaxEXT);
1241 Reqs.
addCapability(SPIRV::Capability::AtomicFloat64MinMaxEXT);
1245 "Unexpected floating-point type width in atomic float instruction");
1251 if (ImageInst->
getOpcode() != SPIRV::OpTypeImage)
1255 return Dim == SPIRV::Dim::DIM_Buffer && Sampled == 1;
1259 if (ImageInst->
getOpcode() != SPIRV::OpTypeImage)
1263 return Dim == SPIRV::Dim::DIM_Buffer && Sampled == 2;
1267 if (ImageInst->
getOpcode() != SPIRV::OpTypeImage)
1271 return Dim != SPIRV::Dim::DIM_Buffer && Sampled == 1;
1275 if (ImageInst->
getOpcode() != SPIRV::OpTypeImage)
1279 return Dim == SPIRV::Dim::DIM_SubpassData && Sampled == 2;
1283 if (ImageInst->
getOpcode() != SPIRV::OpTypeImage)
1287 return Dim != SPIRV::Dim::DIM_Buffer && Sampled == 2;
1290bool isCombinedImageSampler(
MachineInstr *SampledImageInst) {
1291 if (SampledImageInst->
getOpcode() != SPIRV::OpTypeSampledImage)
1297 return isSampledImage(ImageInst);
1302 if (
MI.getOpcode() != SPIRV::OpDecorate)
1306 if (Dec == SPIRV::Decoration::NonUniformEXT)
1324 if (
StorageClass != SPIRV::StorageClass::StorageClass::UniformConstant &&
1325 StorageClass != SPIRV::StorageClass::StorageClass::Uniform &&
1326 StorageClass != SPIRV::StorageClass::StorageClass::StorageBuffer) {
1331 hasNonUniformDecoration(
Instr.getOperand(0).getReg(), MRI);
1333 auto FirstIndexReg =
Instr.getOperand(3).getReg();
1334 bool FirstIndexIsConstant =
1337 if (
StorageClass == SPIRV::StorageClass::StorageClass::StorageBuffer) {
1340 SPIRV::Capability::StorageBufferArrayNonUniformIndexingEXT);
1341 else if (!FirstIndexIsConstant)
1343 SPIRV::Capability::StorageBufferArrayDynamicIndexing);
1349 if (PointeeType->
getOpcode() != SPIRV::OpTypeImage &&
1350 PointeeType->
getOpcode() != SPIRV::OpTypeSampledImage &&
1351 PointeeType->
getOpcode() != SPIRV::OpTypeSampler) {
1355 if (isUniformTexelBuffer(PointeeType)) {
1358 SPIRV::Capability::UniformTexelBufferArrayNonUniformIndexingEXT);
1359 else if (!FirstIndexIsConstant)
1361 SPIRV::Capability::UniformTexelBufferArrayDynamicIndexingEXT);
1362 }
else if (isInputAttachment(PointeeType)) {
1365 SPIRV::Capability::InputAttachmentArrayNonUniformIndexingEXT);
1366 else if (!FirstIndexIsConstant)
1368 SPIRV::Capability::InputAttachmentArrayDynamicIndexingEXT);
1369 }
else if (isStorageTexelBuffer(PointeeType)) {
1372 SPIRV::Capability::StorageTexelBufferArrayNonUniformIndexingEXT);
1373 else if (!FirstIndexIsConstant)
1375 SPIRV::Capability::StorageTexelBufferArrayDynamicIndexingEXT);
1376 }
else if (isSampledImage(PointeeType) ||
1377 isCombinedImageSampler(PointeeType) ||
1378 PointeeType->
getOpcode() == SPIRV::OpTypeSampler) {
1381 SPIRV::Capability::SampledImageArrayNonUniformIndexingEXT);
1382 else if (!FirstIndexIsConstant)
1384 SPIRV::Capability::SampledImageArrayDynamicIndexing);
1385 }
else if (isStorageImage(PointeeType)) {
1388 SPIRV::Capability::StorageImageArrayNonUniformIndexingEXT);
1389 else if (!FirstIndexIsConstant)
1391 SPIRV::Capability::StorageImageArrayDynamicIndexing);
1395static bool isImageTypeWithUnknownFormat(
SPIRVTypeInst TypeInst) {
1396 if (TypeInst->
getOpcode() != SPIRV::OpTypeImage)
1405 if (
ST.canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product))
1406 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_integer_dot_product);
1410 assert(
MI.getOperand(2).isReg() &&
"Unexpected operand in dot");
1414 assert(
Input->getOperand(1).isReg() &&
"Unexpected operand in dot input");
1418 if (TypeDef->
getOpcode() == SPIRV::OpTypeInt) {
1420 Reqs.
addCapability(SPIRV::Capability::DotProductInput4x8BitPacked);
1421 }
else if (TypeDef->
getOpcode() == SPIRV::OpTypeVector) {
1427 "Dot operand of 8-bit integer type requires 4 components");
1428 Reqs.
addCapability(SPIRV::Capability::DotProductInput4x8Bit);
1443 unsigned AddrSpace = ASOp.
getImm();
1444 if (AddrSpace != SPIRV::StorageClass::UniformConstant) {
1445 if (!
ST.canUseExtension(
1447 SPV_EXT_relaxed_printf_string_address_space)) {
1449 "required because printf uses a format string not "
1450 "in constant address space.",
1454 SPIRV::Extension::SPV_EXT_relaxed_printf_string_address_space);
1463 if (
MI.getNumOperands() <=
OpIdx)
1467 if (Mask & (1U <<
I))
1476 unsigned Op =
MI.getOpcode();
1478 case SPIRV::OpMemoryModel: {
1479 int64_t Addr =
MI.getOperand(0).getImm();
1482 int64_t Mem =
MI.getOperand(1).getImm();
1487 case SPIRV::OpEntryPoint: {
1488 int64_t
Exe =
MI.getOperand(0).getImm();
1493 case SPIRV::OpExecutionMode:
1494 case SPIRV::OpExecutionModeId: {
1495 int64_t
Exe =
MI.getOperand(1).getImm();
1500 case SPIRV::OpTypeMatrix:
1503 case SPIRV::OpTypeInt: {
1504 unsigned BitWidth =
MI.getOperand(1).getImm();
1512 ST.canUseExtension(SPIRV::Extension::SPV_INTEL_int4)) {
1516 if (!
ST.canUseExtension(
1517 SPIRV::Extension::SPV_ALTERA_arbitrary_precision_integers))
1519 "OpTypeInt type with a width other than 8, 16, 32 or 64 bits "
1520 "requires the following SPIR-V extension: "
1521 "SPV_ALTERA_arbitrary_precision_integers");
1523 SPIRV::Extension::SPV_ALTERA_arbitrary_precision_integers);
1524 Reqs.
addCapability(SPIRV::Capability::ArbitraryPrecisionIntegersALTERA);
1528 case SPIRV::OpDot: {
1531 if (isBFloat16Type(TypeDef))
1532 Reqs.
addCapability(SPIRV::Capability::BFloat16DotProductKHR);
1535 case SPIRV::OpTypeFloat: {
1536 unsigned BitWidth =
MI.getOperand(1).getImm();
1540 if (isBFloat16Type(&
MI)) {
1541 if (!
ST.canUseExtension(SPIRV::Extension::SPV_KHR_bfloat16))
1543 "following SPIR-V extension: SPV_KHR_bfloat16",
1553 case SPIRV::OpTypeVector: {
1554 unsigned NumComponents =
MI.getOperand(2).getImm();
1555 if (NumComponents == 8 || NumComponents == 16)
1561 if (ElemTypeDef->
getOpcode() == SPIRV::OpTypePointer &&
1562 ST.canUseExtension(SPIRV::Extension::SPV_INTEL_masked_gather_scatter)) {
1563 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_masked_gather_scatter);
1564 Reqs.
addCapability(SPIRV::Capability::MaskedGatherScatterINTEL);
1568 case SPIRV::OpTypePointer: {
1569 auto SC =
MI.getOperand(1).getImm();
1580 (TypeDef->
getOpcode() == SPIRV::OpTypeFloat) &&
1585 case SPIRV::OpExtInst: {
1586 if (
MI.getOperand(2).getImm() ==
1587 static_cast<int64_t
>(
1588 SPIRV::InstructionSet::NonSemantic_Shader_DebugInfo_100)) {
1589 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_non_semantic_info);
1592 if (
MI.getOperand(3).getImm() ==
1593 static_cast<int64_t
>(SPIRV::OpenCLExtInst::printf)) {
1594 addPrintfRequirements(
MI, Reqs, ST);
1601 case SPIRV::OpAliasDomainDeclINTEL:
1602 case SPIRV::OpAliasScopeDeclINTEL:
1603 case SPIRV::OpAliasScopeListDeclINTEL: {
1604 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_memory_access_aliasing);
1605 Reqs.
addCapability(SPIRV::Capability::MemoryAccessAliasingINTEL);
1608 case SPIRV::OpBitReverse:
1609 case SPIRV::OpBitFieldInsert:
1610 case SPIRV::OpBitFieldSExtract:
1611 case SPIRV::OpBitFieldUExtract:
1612 if (!
ST.canUseExtension(SPIRV::Extension::SPV_KHR_bit_instructions)) {
1616 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_bit_instructions);
1619 case SPIRV::OpTypeRuntimeArray:
1622 case SPIRV::OpTypeOpaque:
1623 case SPIRV::OpTypeEvent:
1626 case SPIRV::OpTypePipe:
1627 case SPIRV::OpTypeReserveId:
1630 case SPIRV::OpTypeDeviceEvent:
1631 case SPIRV::OpTypeQueue:
1632 case SPIRV::OpBuildNDRange:
1635 case SPIRV::OpDecorate:
1636 case SPIRV::OpDecorateId:
1637 case SPIRV::OpDecorateString:
1638 addOpDecorateReqs(
MI, 1, Reqs, ST);
1640 case SPIRV::OpMemberDecorate:
1641 case SPIRV::OpMemberDecorateString:
1642 addOpDecorateReqs(
MI, 2, Reqs, ST);
1644 case SPIRV::OpInBoundsPtrAccessChain:
1647 case SPIRV::OpConstantSampler:
1650 case SPIRV::OpInBoundsAccessChain:
1651 case SPIRV::OpAccessChain:
1652 addOpAccessChainReqs(
MI, Reqs, ST);
1654 case SPIRV::OpTypeImage:
1655 addOpTypeImageReqs(
MI, Reqs, ST);
1657 case SPIRV::OpTypeSampler:
1658 if (!
ST.isShader()) {
1662 case SPIRV::OpTypeForwardPointer:
1666 case SPIRV::OpAtomicFlagTestAndSet:
1667 case SPIRV::OpAtomicLoad:
1668 case SPIRV::OpAtomicStore:
1669 case SPIRV::OpAtomicExchange:
1670 case SPIRV::OpAtomicCompareExchange:
1671 case SPIRV::OpAtomicIIncrement:
1672 case SPIRV::OpAtomicIDecrement:
1673 case SPIRV::OpAtomicIAdd:
1674 case SPIRV::OpAtomicISub:
1675 case SPIRV::OpAtomicUMin:
1676 case SPIRV::OpAtomicUMax:
1677 case SPIRV::OpAtomicSMin:
1678 case SPIRV::OpAtomicSMax:
1679 case SPIRV::OpAtomicAnd:
1680 case SPIRV::OpAtomicOr:
1681 case SPIRV::OpAtomicXor: {
1684 if (
Op == SPIRV::OpAtomicStore) {
1687 assert(InstrPtr &&
"Unexpected type instruction for OpAtomicStore");
1693 if (TypeDef->
getOpcode() == SPIRV::OpTypeInt) {
1698 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_16bit_atomics))
1700 "16-bit integer atomic operations require the following SPIR-V "
1701 "extension: SPV_INTEL_16bit_atomics",
1703 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_16bit_atomics);
1705 case SPIRV::OpAtomicLoad:
1706 case SPIRV::OpAtomicStore:
1707 case SPIRV::OpAtomicExchange:
1708 case SPIRV::OpAtomicCompareExchange:
1709 case SPIRV::OpAtomicCompareExchangeWeak:
1711 SPIRV::Capability::AtomicInt16CompareExchangeINTEL);
1718 }
else if (isBFloat16Type(TypeDef)) {
1719 if (
is_contained({SPIRV::OpAtomicLoad, SPIRV::OpAtomicStore,
1720 SPIRV::OpAtomicExchange},
1722 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_16bit_atomics))
1724 "The atomic bfloat16 instruction requires the following SPIR-V "
1725 "extension: SPV_INTEL_16bit_atomics",
1727 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_16bit_atomics);
1728 Reqs.
addCapability(SPIRV::Capability::AtomicBFloat16LoadStoreINTEL);
1733 case SPIRV::OpGroupNonUniformIAdd:
1734 case SPIRV::OpGroupNonUniformFAdd:
1735 case SPIRV::OpGroupNonUniformIMul:
1736 case SPIRV::OpGroupNonUniformFMul:
1737 case SPIRV::OpGroupNonUniformSMin:
1738 case SPIRV::OpGroupNonUniformUMin:
1739 case SPIRV::OpGroupNonUniformFMin:
1740 case SPIRV::OpGroupNonUniformSMax:
1741 case SPIRV::OpGroupNonUniformUMax:
1742 case SPIRV::OpGroupNonUniformFMax:
1743 case SPIRV::OpGroupNonUniformBitwiseAnd:
1744 case SPIRV::OpGroupNonUniformBitwiseOr:
1745 case SPIRV::OpGroupNonUniformBitwiseXor:
1746 case SPIRV::OpGroupNonUniformLogicalAnd:
1747 case SPIRV::OpGroupNonUniformLogicalOr:
1748 case SPIRV::OpGroupNonUniformLogicalXor: {
1750 int64_t GroupOp =
MI.getOperand(3).getImm();
1752 case SPIRV::GroupOperation::Reduce:
1753 case SPIRV::GroupOperation::InclusiveScan:
1754 case SPIRV::GroupOperation::ExclusiveScan:
1755 Reqs.
addCapability(SPIRV::Capability::GroupNonUniformArithmetic);
1757 case SPIRV::GroupOperation::ClusteredReduce:
1758 Reqs.
addCapability(SPIRV::Capability::GroupNonUniformClustered);
1760 case SPIRV::GroupOperation::PartitionedReduceNV:
1761 case SPIRV::GroupOperation::PartitionedInclusiveScanNV:
1762 case SPIRV::GroupOperation::PartitionedExclusiveScanNV:
1763 Reqs.
addCapability(SPIRV::Capability::GroupNonUniformPartitionedNV);
1768 case SPIRV::OpGroupNonUniformQuadSwap:
1771 case SPIRV::OpImageQueryLod:
1774 case SPIRV::OpImageQuerySize:
1775 case SPIRV::OpImageQuerySizeLod:
1776 case SPIRV::OpImageQueryLevels:
1777 case SPIRV::OpImageQuerySamples:
1781 case SPIRV::OpImageQueryFormat: {
1782 Register ResultReg =
MI.getOperand(0).getReg();
1784 static const unsigned CompareOps[] = {
1785 SPIRV::OpIEqual, SPIRV::OpINotEqual,
1786 SPIRV::OpUGreaterThan, SPIRV::OpUGreaterThanEqual,
1787 SPIRV::OpULessThan, SPIRV::OpULessThanEqual,
1788 SPIRV::OpSGreaterThan, SPIRV::OpSGreaterThanEqual,
1789 SPIRV::OpSLessThan, SPIRV::OpSLessThanEqual};
1791 auto CheckAndAddExtension = [&](int64_t ImmVal) {
1792 if (ImmVal == 4323 || ImmVal == 4324) {
1793 if (
ST.canUseExtension(SPIRV::Extension::SPV_EXT_image_raw10_raw12))
1794 Reqs.
addExtension(SPIRV::Extension::SPV_EXT_image_raw10_raw12);
1797 "SPV_EXT_image_raw10_raw12 extension");
1802 unsigned Opc = UseInst.getOpcode();
1804 if (
Opc == SPIRV::OpSwitch) {
1807 CheckAndAddExtension(
Op.getImm());
1809 for (
unsigned i = 1; i < UseInst.getNumOperands(); ++i) {
1812 if (ConstInst && ConstInst->
getOpcode() == SPIRV::OpConstantI) {
1815 CheckAndAddExtension(ImmVal);
1823 case SPIRV::OpGroupNonUniformShuffle:
1824 case SPIRV::OpGroupNonUniformShuffleXor:
1825 Reqs.
addCapability(SPIRV::Capability::GroupNonUniformShuffle);
1827 case SPIRV::OpGroupNonUniformShuffleUp:
1828 case SPIRV::OpGroupNonUniformShuffleDown:
1829 Reqs.
addCapability(SPIRV::Capability::GroupNonUniformShuffleRelative);
1831 case SPIRV::OpGroupAll:
1832 case SPIRV::OpGroupAny:
1833 case SPIRV::OpGroupBroadcast:
1834 case SPIRV::OpGroupIAdd:
1835 case SPIRV::OpGroupFAdd:
1836 case SPIRV::OpGroupFMin:
1837 case SPIRV::OpGroupUMin:
1838 case SPIRV::OpGroupSMin:
1839 case SPIRV::OpGroupFMax:
1840 case SPIRV::OpGroupUMax:
1841 case SPIRV::OpGroupSMax:
1844 case SPIRV::OpGroupNonUniformElect:
1847 case SPIRV::OpGroupNonUniformAll:
1848 case SPIRV::OpGroupNonUniformAny:
1849 case SPIRV::OpGroupNonUniformAllEqual:
1852 case SPIRV::OpGroupNonUniformBroadcast:
1853 case SPIRV::OpGroupNonUniformBroadcastFirst:
1854 case SPIRV::OpGroupNonUniformBallot:
1855 case SPIRV::OpGroupNonUniformInverseBallot:
1856 case SPIRV::OpGroupNonUniformBallotBitExtract:
1857 case SPIRV::OpGroupNonUniformBallotBitCount:
1858 case SPIRV::OpGroupNonUniformBallotFindLSB:
1859 case SPIRV::OpGroupNonUniformBallotFindMSB:
1860 Reqs.
addCapability(SPIRV::Capability::GroupNonUniformBallot);
1862 case SPIRV::OpSubgroupShuffleINTEL:
1863 case SPIRV::OpSubgroupShuffleDownINTEL:
1864 case SPIRV::OpSubgroupShuffleUpINTEL:
1865 case SPIRV::OpSubgroupShuffleXorINTEL:
1866 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1867 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_subgroups);
1868 Reqs.
addCapability(SPIRV::Capability::SubgroupShuffleINTEL);
1871 case SPIRV::OpSubgroupBlockReadINTEL:
1872 case SPIRV::OpSubgroupBlockWriteINTEL:
1873 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1874 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_subgroups);
1875 Reqs.
addCapability(SPIRV::Capability::SubgroupBufferBlockIOINTEL);
1878 case SPIRV::OpSubgroupImageBlockReadINTEL:
1879 case SPIRV::OpSubgroupImageBlockWriteINTEL:
1880 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1881 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_subgroups);
1882 Reqs.
addCapability(SPIRV::Capability::SubgroupImageBlockIOINTEL);
1885 case SPIRV::OpSubgroupImageMediaBlockReadINTEL:
1886 case SPIRV::OpSubgroupImageMediaBlockWriteINTEL:
1887 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_media_block_io)) {
1888 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_media_block_io);
1889 Reqs.
addCapability(SPIRV::Capability::SubgroupImageMediaBlockIOINTEL);
1892 case SPIRV::OpAssumeTrueKHR:
1893 case SPIRV::OpExpectKHR:
1894 if (
ST.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume)) {
1895 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_expect_assume);
1899 case SPIRV::OpFmaKHR:
1900 if (
ST.canUseExtension(SPIRV::Extension::SPV_KHR_fma)) {
1905 case SPIRV::OpPtrCastToCrossWorkgroupINTEL:
1906 case SPIRV::OpCrossWorkgroupCastToPtrINTEL:
1907 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes)) {
1908 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes);
1909 Reqs.
addCapability(SPIRV::Capability::USMStorageClassesINTEL);
1912 case SPIRV::OpConstantFunctionPointerINTEL:
1913 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)) {
1914 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_function_pointers);
1915 Reqs.
addCapability(SPIRV::Capability::FunctionPointersINTEL);
1918 case SPIRV::OpGroupNonUniformRotateKHR:
1919 if (!
ST.canUseExtension(SPIRV::Extension::SPV_KHR_subgroup_rotate))
1921 "following SPIR-V extension: SPV_KHR_subgroup_rotate",
1923 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_subgroup_rotate);
1924 Reqs.
addCapability(SPIRV::Capability::GroupNonUniformRotateKHR);
1927 case SPIRV::OpFixedCosALTERA:
1928 case SPIRV::OpFixedSinALTERA:
1929 case SPIRV::OpFixedCosPiALTERA:
1930 case SPIRV::OpFixedSinPiALTERA:
1931 case SPIRV::OpFixedExpALTERA:
1932 case SPIRV::OpFixedLogALTERA:
1933 case SPIRV::OpFixedRecipALTERA:
1934 case SPIRV::OpFixedSqrtALTERA:
1935 case SPIRV::OpFixedSinCosALTERA:
1936 case SPIRV::OpFixedSinCosPiALTERA:
1937 case SPIRV::OpFixedRsqrtALTERA:
1938 if (!
ST.canUseExtension(
1939 SPIRV::Extension::SPV_ALTERA_arbitrary_precision_fixed_point))
1941 "following SPIR-V extension: "
1942 "SPV_ALTERA_arbitrary_precision_fixed_point",
1945 SPIRV::Extension::SPV_ALTERA_arbitrary_precision_fixed_point);
1946 Reqs.
addCapability(SPIRV::Capability::ArbitraryPrecisionFixedPointALTERA);
1948 case SPIRV::OpGroupIMulKHR:
1949 case SPIRV::OpGroupFMulKHR:
1950 case SPIRV::OpGroupBitwiseAndKHR:
1951 case SPIRV::OpGroupBitwiseOrKHR:
1952 case SPIRV::OpGroupBitwiseXorKHR:
1953 case SPIRV::OpGroupLogicalAndKHR:
1954 case SPIRV::OpGroupLogicalOrKHR:
1955 case SPIRV::OpGroupLogicalXorKHR:
1956 if (
ST.canUseExtension(
1957 SPIRV::Extension::SPV_KHR_uniform_group_instructions)) {
1958 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_uniform_group_instructions);
1959 Reqs.
addCapability(SPIRV::Capability::GroupUniformArithmeticKHR);
1962 case SPIRV::OpReadClockKHR:
1963 if (!
ST.canUseExtension(SPIRV::Extension::SPV_KHR_shader_clock))
1965 "following SPIR-V extension: SPV_KHR_shader_clock",
1967 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_shader_clock);
1970 case SPIRV::OpAbortKHR:
1971 if (!
ST.canUseExtension(SPIRV::Extension::SPV_KHR_abort))
1973 "following SPIR-V extension: SPV_KHR_abort",
1978 case SPIRV::OpFunctionPointerCallINTEL:
1979 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)) {
1980 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_function_pointers);
1981 Reqs.
addCapability(SPIRV::Capability::FunctionPointersINTEL);
1984 case SPIRV::OpAtomicFAddEXT:
1985 case SPIRV::OpAtomicFMinEXT:
1986 case SPIRV::OpAtomicFMaxEXT:
1987 AddAtomicFloatRequirements(
MI, Reqs, ST);
1989 case SPIRV::OpConvertBF16ToFINTEL:
1990 case SPIRV::OpConvertFToBF16INTEL:
1991 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_bfloat16_conversion)) {
1992 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_bfloat16_conversion);
1993 Reqs.
addCapability(SPIRV::Capability::BFloat16ConversionINTEL);
1996 case SPIRV::OpRoundFToTF32INTEL:
1997 if (
ST.canUseExtension(
1998 SPIRV::Extension::SPV_INTEL_tensor_float32_conversion)) {
1999 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_tensor_float32_conversion);
2000 Reqs.
addCapability(SPIRV::Capability::TensorFloat32RoundingINTEL);
2003 case SPIRV::OpVariableLengthArrayINTEL:
2004 case SPIRV::OpSaveMemoryINTEL:
2005 case SPIRV::OpRestoreMemoryINTEL:
2006 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array)) {
2007 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_variable_length_array);
2008 Reqs.
addCapability(SPIRV::Capability::VariableLengthArrayINTEL);
2011 case SPIRV::OpAsmTargetINTEL:
2012 case SPIRV::OpAsmINTEL:
2013 case SPIRV::OpAsmCallINTEL:
2014 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_inline_assembly)) {
2015 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_inline_assembly);
2019 case SPIRV::OpTypeCooperativeMatrixKHR: {
2020 if (!
ST.canUseExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix))
2022 "OpTypeCooperativeMatrixKHR type requires the "
2023 "following SPIR-V extension: SPV_KHR_cooperative_matrix",
2025 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix);
2026 Reqs.
addCapability(SPIRV::Capability::CooperativeMatrixKHR);
2029 if (isBFloat16Type(TypeDef))
2030 Reqs.
addCapability(SPIRV::Capability::BFloat16CooperativeMatrixKHR);
2033 case SPIRV::OpArithmeticFenceEXT:
2034 if (!
ST.canUseExtension(SPIRV::Extension::SPV_EXT_arithmetic_fence))
2036 "following SPIR-V extension: SPV_EXT_arithmetic_fence",
2038 Reqs.
addExtension(SPIRV::Extension::SPV_EXT_arithmetic_fence);
2041 case SPIRV::OpControlBarrierArriveINTEL:
2042 case SPIRV::OpControlBarrierWaitINTEL:
2043 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_split_barrier)) {
2044 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_split_barrier);
2048 case SPIRV::OpCooperativeMatrixMulAddKHR: {
2049 if (!
ST.canUseExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix))
2051 "following SPIR-V extension: "
2052 "SPV_KHR_cooperative_matrix",
2054 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix);
2055 Reqs.
addCapability(SPIRV::Capability::CooperativeMatrixKHR);
2056 constexpr unsigned MulAddMaxSize = 6;
2057 if (
MI.getNumOperands() != MulAddMaxSize)
2059 const int64_t CoopOperands =
MI.getOperand(MulAddMaxSize - 1).getImm();
2061 SPIRV::CooperativeMatrixOperands::MatrixAAndBTF32ComponentsINTEL) {
2062 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
2064 "require the following SPIR-V extension: "
2065 "SPV_INTEL_joint_matrix",
2067 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
2069 SPIRV::Capability::CooperativeMatrixTF32ComponentTypeINTEL);
2072 MatrixAAndBBFloat16ComponentsINTEL ||
2074 SPIRV::CooperativeMatrixOperands::MatrixCBFloat16ComponentsINTEL ||
2076 MatrixResultBFloat16ComponentsINTEL) {
2077 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
2079 "require the following SPIR-V extension: "
2080 "SPV_INTEL_joint_matrix",
2082 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
2084 SPIRV::Capability::CooperativeMatrixBFloat16ComponentTypeINTEL);
2088 case SPIRV::OpCooperativeMatrixLoadKHR:
2089 case SPIRV::OpCooperativeMatrixStoreKHR:
2090 case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
2091 case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
2092 case SPIRV::OpCooperativeMatrixPrefetchINTEL: {
2093 if (!
ST.canUseExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix))
2095 "following SPIR-V extension: "
2096 "SPV_KHR_cooperative_matrix",
2098 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix);
2099 Reqs.
addCapability(SPIRV::Capability::CooperativeMatrixKHR);
2103 std::unordered_map<unsigned, unsigned> LayoutToInstMap = {
2104 {SPIRV::OpCooperativeMatrixLoadKHR, 3},
2105 {SPIRV::OpCooperativeMatrixStoreKHR, 2},
2106 {SPIRV::OpCooperativeMatrixLoadCheckedINTEL, 5},
2107 {SPIRV::OpCooperativeMatrixStoreCheckedINTEL, 4},
2108 {SPIRV::OpCooperativeMatrixPrefetchINTEL, 4}};
2110 const unsigned LayoutNum = LayoutToInstMap[
Op];
2111 Register RegLayout =
MI.getOperand(LayoutNum).getReg();
2114 if (MILayout->
getOpcode() == SPIRV::OpConstantI) {
2117 static_cast<unsigned>(SPIRV::CooperativeMatrixLayout::PackedINTEL)) {
2118 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
2120 "extension: SPV_INTEL_joint_matrix",
2122 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
2123 Reqs.
addCapability(SPIRV::Capability::PackedCooperativeMatrixINTEL);
2128 if (
Op == SPIRV::OpCooperativeMatrixLoadKHR ||
2129 Op == SPIRV::OpCooperativeMatrixStoreKHR)
2132 std::string InstName;
2134 case SPIRV::OpCooperativeMatrixPrefetchINTEL:
2135 InstName =
"OpCooperativeMatrixPrefetchINTEL";
2137 case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
2138 InstName =
"OpCooperativeMatrixLoadCheckedINTEL";
2140 case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
2141 InstName =
"OpCooperativeMatrixStoreCheckedINTEL";
2145 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix)) {
2146 const std::string ErrorMsg =
2147 InstName +
" instruction requires the "
2148 "following SPIR-V extension: SPV_INTEL_joint_matrix";
2151 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
2152 if (
Op == SPIRV::OpCooperativeMatrixPrefetchINTEL) {
2153 Reqs.
addCapability(SPIRV::Capability::CooperativeMatrixPrefetchINTEL);
2157 SPIRV::Capability::CooperativeMatrixCheckedInstructionsINTEL);
2160 case SPIRV::OpCooperativeMatrixConstructCheckedINTEL:
2161 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
2163 "instructions require the following SPIR-V extension: "
2164 "SPV_INTEL_joint_matrix",
2166 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
2168 SPIRV::Capability::CooperativeMatrixCheckedInstructionsINTEL);
2170 case SPIRV::OpReadPipeBlockingALTERA:
2171 case SPIRV::OpWritePipeBlockingALTERA:
2172 if (
ST.canUseExtension(SPIRV::Extension::SPV_ALTERA_blocking_pipes)) {
2173 Reqs.
addExtension(SPIRV::Extension::SPV_ALTERA_blocking_pipes);
2177 case SPIRV::OpCooperativeMatrixGetElementCoordINTEL:
2178 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
2180 "following SPIR-V extension: SPV_INTEL_joint_matrix",
2182 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
2184 SPIRV::Capability::CooperativeMatrixInvocationInstructionsINTEL);
2186 case SPIRV::OpConvertHandleToImageINTEL:
2187 case SPIRV::OpConvertHandleToSamplerINTEL:
2188 case SPIRV::OpConvertHandleToSampledImageINTEL: {
2189 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_bindless_images))
2191 "instructions require the following SPIR-V extension: "
2192 "SPV_INTEL_bindless_images",
2195 SPIRV::AddressingModel::AddressingModel AddrModel = MAI.
Addr;
2197 if (
Op == SPIRV::OpConvertHandleToImageINTEL &&
2198 TyDef->
getOpcode() != SPIRV::OpTypeImage) {
2200 "OpConvertHandleToImageINTEL",
2202 }
else if (
Op == SPIRV::OpConvertHandleToSamplerINTEL &&
2203 TyDef->
getOpcode() != SPIRV::OpTypeSampler) {
2205 "OpConvertHandleToSamplerINTEL",
2207 }
else if (
Op == SPIRV::OpConvertHandleToSampledImageINTEL &&
2208 TyDef->
getOpcode() != SPIRV::OpTypeSampledImage) {
2210 "OpConvertHandleToSampledImageINTEL",
2215 if (!(Bitwidth == 32 && AddrModel == SPIRV::AddressingModel::Physical32) &&
2216 !(Bitwidth == 64 && AddrModel == SPIRV::AddressingModel::Physical64)) {
2218 "Parameter value must be a 32-bit scalar in case of "
2219 "Physical32 addressing model or a 64-bit scalar in case of "
2220 "Physical64 addressing model",
2223 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_bindless_images);
2227 case SPIRV::OpSubgroup2DBlockLoadINTEL:
2228 case SPIRV::OpSubgroup2DBlockLoadTransposeINTEL:
2229 case SPIRV::OpSubgroup2DBlockLoadTransformINTEL:
2230 case SPIRV::OpSubgroup2DBlockPrefetchINTEL:
2231 case SPIRV::OpSubgroup2DBlockStoreINTEL: {
2232 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_2d_block_io))
2234 "Prefetch/Store]INTEL instructions require the "
2235 "following SPIR-V extension: SPV_INTEL_2d_block_io",
2237 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_2d_block_io);
2238 Reqs.
addCapability(SPIRV::Capability::Subgroup2DBlockIOINTEL);
2240 if (
Op == SPIRV::OpSubgroup2DBlockLoadTransposeINTEL) {
2241 Reqs.
addCapability(SPIRV::Capability::Subgroup2DBlockTransposeINTEL);
2244 if (
Op == SPIRV::OpSubgroup2DBlockLoadTransformINTEL) {
2245 Reqs.
addCapability(SPIRV::Capability::Subgroup2DBlockTransformINTEL);
2250 case SPIRV::OpKill: {
2253 case SPIRV::OpDemoteToHelperInvocation:
2254 Reqs.
addCapability(SPIRV::Capability::DemoteToHelperInvocation);
2256 if (
ST.canUseExtension(
2257 SPIRV::Extension::SPV_EXT_demote_to_helper_invocation)) {
2260 SPIRV::Extension::SPV_EXT_demote_to_helper_invocation);
2265 case SPIRV::OpSUDot:
2266 case SPIRV::OpSDotAccSat:
2267 case SPIRV::OpUDotAccSat:
2268 case SPIRV::OpSUDotAccSat:
2269 AddDotProductRequirements(
MI, Reqs, ST);
2271 case SPIRV::OpImageSampleImplicitLod:
2273 addImageOperandReqs(
MI, Reqs, ST, 4);
2275 case SPIRV::OpImageSampleExplicitLod:
2276 addImageOperandReqs(
MI, Reqs, ST, 4);
2278 case SPIRV::OpImageSampleDrefImplicitLod:
2280 addImageOperandReqs(
MI, Reqs, ST, 5);
2282 case SPIRV::OpImageSampleDrefExplicitLod:
2284 addImageOperandReqs(
MI, Reqs, ST, 5);
2286 case SPIRV::OpImageFetch:
2288 addImageOperandReqs(
MI, Reqs, ST, 4);
2290 case SPIRV::OpImageDrefGather:
2291 case SPIRV::OpImageGather:
2293 addImageOperandReqs(
MI, Reqs, ST, 5);
2295 case SPIRV::OpImageRead: {
2296 Register ImageReg =
MI.getOperand(2).getReg();
2305 if (isImageTypeWithUnknownFormat(TypeDef) &&
ST.isShader())
2306 Reqs.
addCapability(SPIRV::Capability::StorageImageReadWithoutFormat);
2309 case SPIRV::OpImageWrite: {
2310 Register ImageReg =
MI.getOperand(0).getReg();
2319 if (isImageTypeWithUnknownFormat(TypeDef) &&
ST.isShader())
2320 Reqs.
addCapability(SPIRV::Capability::StorageImageWriteWithoutFormat);
2323 case SPIRV::OpTypeStructContinuedINTEL:
2324 case SPIRV::OpConstantCompositeContinuedINTEL:
2325 case SPIRV::OpSpecConstantCompositeContinuedINTEL:
2326 case SPIRV::OpCompositeConstructContinuedINTEL: {
2327 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_long_composites))
2329 "Continued instructions require the "
2330 "following SPIR-V extension: SPV_INTEL_long_composites",
2332 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_long_composites);
2336 case SPIRV::OpArbitraryFloatEQALTERA:
2337 case SPIRV::OpArbitraryFloatGEALTERA:
2338 case SPIRV::OpArbitraryFloatGTALTERA:
2339 case SPIRV::OpArbitraryFloatLEALTERA:
2340 case SPIRV::OpArbitraryFloatLTALTERA:
2341 case SPIRV::OpArbitraryFloatCbrtALTERA:
2342 case SPIRV::OpArbitraryFloatCosALTERA:
2343 case SPIRV::OpArbitraryFloatCosPiALTERA:
2344 case SPIRV::OpArbitraryFloatExp10ALTERA:
2345 case SPIRV::OpArbitraryFloatExp2ALTERA:
2346 case SPIRV::OpArbitraryFloatExpALTERA:
2347 case SPIRV::OpArbitraryFloatExpm1ALTERA:
2348 case SPIRV::OpArbitraryFloatHypotALTERA:
2349 case SPIRV::OpArbitraryFloatLog10ALTERA:
2350 case SPIRV::OpArbitraryFloatLog1pALTERA:
2351 case SPIRV::OpArbitraryFloatLog2ALTERA:
2352 case SPIRV::OpArbitraryFloatLogALTERA:
2353 case SPIRV::OpArbitraryFloatRecipALTERA:
2354 case SPIRV::OpArbitraryFloatSinCosALTERA:
2355 case SPIRV::OpArbitraryFloatSinCosPiALTERA:
2356 case SPIRV::OpArbitraryFloatSinALTERA:
2357 case SPIRV::OpArbitraryFloatSinPiALTERA:
2358 case SPIRV::OpArbitraryFloatSqrtALTERA:
2359 case SPIRV::OpArbitraryFloatACosALTERA:
2360 case SPIRV::OpArbitraryFloatACosPiALTERA:
2361 case SPIRV::OpArbitraryFloatAddALTERA:
2362 case SPIRV::OpArbitraryFloatASinALTERA:
2363 case SPIRV::OpArbitraryFloatASinPiALTERA:
2364 case SPIRV::OpArbitraryFloatATan2ALTERA:
2365 case SPIRV::OpArbitraryFloatATanALTERA:
2366 case SPIRV::OpArbitraryFloatATanPiALTERA:
2367 case SPIRV::OpArbitraryFloatCastFromIntALTERA:
2368 case SPIRV::OpArbitraryFloatCastALTERA:
2369 case SPIRV::OpArbitraryFloatCastToIntALTERA:
2370 case SPIRV::OpArbitraryFloatDivALTERA:
2371 case SPIRV::OpArbitraryFloatMulALTERA:
2372 case SPIRV::OpArbitraryFloatPowALTERA:
2373 case SPIRV::OpArbitraryFloatPowNALTERA:
2374 case SPIRV::OpArbitraryFloatPowRALTERA:
2375 case SPIRV::OpArbitraryFloatRSqrtALTERA:
2376 case SPIRV::OpArbitraryFloatSubALTERA: {
2377 if (!
ST.canUseExtension(
2378 SPIRV::Extension::SPV_ALTERA_arbitrary_precision_floating_point))
2380 "Floating point instructions can't be translated correctly without "
2381 "enabled SPV_ALTERA_arbitrary_precision_floating_point extension!",
2384 SPIRV::Extension::SPV_ALTERA_arbitrary_precision_floating_point);
2386 SPIRV::Capability::ArbitraryPrecisionFloatingPointALTERA);
2389 case SPIRV::OpSubgroupMatrixMultiplyAccumulateINTEL: {
2390 if (!
ST.canUseExtension(
2391 SPIRV::Extension::SPV_INTEL_subgroup_matrix_multiply_accumulate))
2393 "OpSubgroupMatrixMultiplyAccumulateINTEL instruction requires the "
2395 "extension: SPV_INTEL_subgroup_matrix_multiply_accumulate",
2398 SPIRV::Extension::SPV_INTEL_subgroup_matrix_multiply_accumulate);
2400 SPIRV::Capability::SubgroupMatrixMultiplyAccumulateINTEL);
2403 case SPIRV::OpBitwiseFunctionINTEL: {
2404 if (!
ST.canUseExtension(
2405 SPIRV::Extension::SPV_INTEL_ternary_bitwise_function))
2407 "OpBitwiseFunctionINTEL instruction requires the following SPIR-V "
2408 "extension: SPV_INTEL_ternary_bitwise_function",
2410 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_ternary_bitwise_function);
2411 Reqs.
addCapability(SPIRV::Capability::TernaryBitwiseFunctionINTEL);
2414 case SPIRV::OpCopyMemorySized: {
2419 case SPIRV::OpPredicatedLoadINTEL:
2420 case SPIRV::OpPredicatedStoreINTEL: {
2421 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_predicated_io))
2423 "OpPredicated[Load/Store]INTEL instructions require "
2424 "the following SPIR-V extension: SPV_INTEL_predicated_io",
2426 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_predicated_io);
2430 case SPIRV::OpFAddS:
2431 case SPIRV::OpFSubS:
2432 case SPIRV::OpFMulS:
2433 case SPIRV::OpFDivS:
2434 case SPIRV::OpFRemS:
2436 case SPIRV::OpFNegate:
2437 case SPIRV::OpFAddV:
2438 case SPIRV::OpFSubV:
2439 case SPIRV::OpFMulV:
2440 case SPIRV::OpFDivV:
2441 case SPIRV::OpFRemV:
2442 case SPIRV::OpFNegateV: {
2445 if (TypeDef->
getOpcode() == SPIRV::OpTypeVector)
2447 if (isBFloat16Type(TypeDef)) {
2448 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_bfloat16_arithmetic))
2450 "Arithmetic instructions with bfloat16 arguments require the "
2451 "following SPIR-V extension: SPV_INTEL_bfloat16_arithmetic",
2453 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_bfloat16_arithmetic);
2454 Reqs.
addCapability(SPIRV::Capability::BFloat16ArithmeticINTEL);
2458 case SPIRV::OpOrdered:
2459 case SPIRV::OpUnordered:
2460 case SPIRV::OpFOrdEqual:
2461 case SPIRV::OpFOrdNotEqual:
2462 case SPIRV::OpFOrdLessThan:
2463 case SPIRV::OpFOrdLessThanEqual:
2464 case SPIRV::OpFOrdGreaterThan:
2465 case SPIRV::OpFOrdGreaterThanEqual:
2466 case SPIRV::OpFUnordEqual:
2467 case SPIRV::OpFUnordNotEqual:
2468 case SPIRV::OpFUnordLessThan:
2469 case SPIRV::OpFUnordLessThanEqual:
2470 case SPIRV::OpFUnordGreaterThan:
2471 case SPIRV::OpFUnordGreaterThanEqual: {
2475 if (TypeDef->
getOpcode() == SPIRV::OpTypeVector)
2477 if (isBFloat16Type(TypeDef)) {
2478 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_bfloat16_arithmetic))
2480 "Relational instructions with bfloat16 arguments require the "
2481 "following SPIR-V extension: SPV_INTEL_bfloat16_arithmetic",
2483 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_bfloat16_arithmetic);
2484 Reqs.
addCapability(SPIRV::Capability::BFloat16ArithmeticINTEL);
2488 case SPIRV::OpDPdxCoarse:
2489 case SPIRV::OpDPdyCoarse:
2490 case SPIRV::OpDPdxFine:
2491 case SPIRV::OpDPdyFine: {
2495 case SPIRV::OpLoopControlINTEL: {
2496 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_unstructured_loop_controls);
2497 Reqs.
addCapability(SPIRV::Capability::UnstructuredLoopControlsINTEL);
2509 SPIRV::Capability::Shader);
2521 addInstrRequirements(
MI, MAI, ST);
2524 auto Node = M.getNamedMetadata(
"spirv.ExecutionMode");
2526 bool RequireFloatControls =
false, RequireIntelFloatControls2 =
false,
2527 RequireKHRFloatControls2 =
false,
2529 bool HasIntelFloatControls2 =
2530 ST.canUseExtension(SPIRV::Extension::SPV_INTEL_float_controls2);
2531 bool HasKHRFloatControls2 =
2532 ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2);
2533 for (
unsigned i = 0; i <
Node->getNumOperands(); i++) {
2539 auto EM =
Const->getZExtValue();
2543 case SPIRV::ExecutionMode::DenormPreserve:
2544 case SPIRV::ExecutionMode::DenormFlushToZero:
2545 case SPIRV::ExecutionMode::RoundingModeRTE:
2546 case SPIRV::ExecutionMode::RoundingModeRTZ:
2547 RequireFloatControls = VerLower14;
2549 SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
2551 case SPIRV::ExecutionMode::RoundingModeRTPINTEL:
2552 case SPIRV::ExecutionMode::RoundingModeRTNINTEL:
2553 case SPIRV::ExecutionMode::FloatingPointModeALTINTEL:
2554 case SPIRV::ExecutionMode::FloatingPointModeIEEEINTEL:
2555 if (HasIntelFloatControls2) {
2556 RequireIntelFloatControls2 =
true;
2558 SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
2561 case SPIRV::ExecutionMode::FPFastMathDefault: {
2562 if (HasKHRFloatControls2) {
2563 RequireKHRFloatControls2 =
true;
2565 SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
2569 case SPIRV::ExecutionMode::ContractionOff:
2570 case SPIRV::ExecutionMode::SignedZeroInfNanPreserve:
2571 if (HasKHRFloatControls2) {
2572 RequireKHRFloatControls2 =
true;
2574 SPIRV::OperandCategory::ExecutionModeOperand,
2575 SPIRV::ExecutionMode::FPFastMathDefault, ST);
2578 SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
2583 SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
2588 if (RequireFloatControls &&
2589 ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls))
2591 if (RequireIntelFloatControls2)
2593 if (RequireKHRFloatControls2)
2597 if (
F.isDeclaration())
2599 if (
F.getMetadata(
"reqd_work_group_size"))
2601 SPIRV::OperandCategory::ExecutionModeOperand,
2602 SPIRV::ExecutionMode::LocalSize, ST);
2603 if (
F.getFnAttribute(
"hlsl.numthreads").isValid()) {
2605 SPIRV::OperandCategory::ExecutionModeOperand,
2606 SPIRV::ExecutionMode::LocalSize, ST);
2608 if (
F.getFnAttribute(
"enable-maximal-reconvergence").getValueAsBool()) {
2611 if (
F.getMetadata(
"work_group_size_hint"))
2613 SPIRV::OperandCategory::ExecutionModeOperand,
2614 SPIRV::ExecutionMode::LocalSizeHint, ST);
2615 if (
F.getMetadata(
"intel_reqd_sub_group_size"))
2617 SPIRV::OperandCategory::ExecutionModeOperand,
2618 SPIRV::ExecutionMode::SubgroupSize, ST);
2619 if (
F.getMetadata(
"max_work_group_size"))
2621 SPIRV::OperandCategory::ExecutionModeOperand,
2622 SPIRV::ExecutionMode::MaxWorkgroupSizeINTEL, ST);
2623 if (
F.getMetadata(
"vec_type_hint"))
2625 SPIRV::OperandCategory::ExecutionModeOperand,
2626 SPIRV::ExecutionMode::VecTypeHint, ST);
2628 if (
F.hasOptNone()) {
2629 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_optnone)) {
2632 }
else if (
ST.canUseExtension(SPIRV::Extension::SPV_EXT_optnone)) {
2642 unsigned Flags = SPIRV::FPFastMathMode::None;
2643 bool CanUseKHRFloatControls2 =
2644 ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2);
2646 Flags |= SPIRV::FPFastMathMode::NotNaN;
2648 Flags |= SPIRV::FPFastMathMode::NotInf;
2650 Flags |= SPIRV::FPFastMathMode::NSZ;
2652 Flags |= SPIRV::FPFastMathMode::AllowRecip;
2654 Flags |= SPIRV::FPFastMathMode::AllowContract;
2656 if (CanUseKHRFloatControls2)
2664 Flags |= SPIRV::FPFastMathMode::NotNaN | SPIRV::FPFastMathMode::NotInf |
2665 SPIRV::FPFastMathMode::NSZ | SPIRV::FPFastMathMode::AllowRecip |
2666 SPIRV::FPFastMathMode::AllowTransform |
2667 SPIRV::FPFastMathMode::AllowReassoc |
2668 SPIRV::FPFastMathMode::AllowContract;
2670 Flags |= SPIRV::FPFastMathMode::Fast;
2673 if (CanUseKHRFloatControls2) {
2675 assert(!(Flags & SPIRV::FPFastMathMode::Fast) &&
2676 "SPIRV::FPFastMathMode::Fast is deprecated and should not be used "
2681 assert((!(Flags & SPIRV::FPFastMathMode::AllowTransform) ||
2682 ((Flags & SPIRV::FPFastMathMode::AllowReassoc &&
2683 Flags & SPIRV::FPFastMathMode::AllowContract))) &&
2684 "SPIRV::FPFastMathMode::AllowTransform requires AllowReassoc and "
2685 "AllowContract flags to be enabled as well.");
2696 return ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2);
2699static void handleMIFlagDecoration(
2704 getSymbolicOperandRequirements(SPIRV::OperandCategory::DecorationOperand,
2705 SPIRV::Decoration::NoSignedWrap, ST, Reqs)
2708 SPIRV::Decoration::NoSignedWrap, {});
2711 getSymbolicOperandRequirements(SPIRV::OperandCategory::DecorationOperand,
2712 SPIRV::Decoration::NoUnsignedWrap, ST,
2716 SPIRV::Decoration::NoUnsignedWrap, {});
2721 TII.canUseFastMathFlags(
2722 I,
ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2)) ||
2723 (
ST.isKernel() &&
I.getOpcode() == SPIRV::OpExtInst);
2727 unsigned FMFlags = getFastMathFlags(
I, ST);
2728 if (FMFlags == SPIRV::FPFastMathMode::None) {
2731 if (FPFastMathDefaultInfoVec.
empty())
2747 assert(
I.getNumOperands() >= 3 &&
"Expected at least 3 operands");
2748 Register ResReg =
I.getOpcode() == SPIRV::OpExtInst
2749 ?
I.getOperand(1).getReg()
2750 :
I.getOperand(2).getReg();
2758 if (Ty == Elem.Ty) {
2759 FMFlags = Elem.FastMathFlags;
2760 Emit = Elem.ContractionOff || Elem.SignedZeroInfNanPreserve ||
2761 Elem.FPFastMathDefault;
2766 if (FMFlags == SPIRV::FPFastMathMode::None && !Emit)
2769 if (isFastMathModeAvailable(ST)) {
2770 Register DstReg =
I.getOperand(0).getReg();
2786 for (
auto &
MBB : *MF)
2787 for (
auto &
MI :
MBB)
2788 handleMIFlagDecoration(
MI, ST,
TII, MAI.
Reqs, GR,
2805 for (
auto &
MBB : *MF) {
2806 if (!
MBB.hasName() ||
MBB.empty())
2825 for (
auto &
MBB : *MF) {
2827 MI.setDesc(
TII.get(SPIRV::OpPhi));
2830 MI.insert(
MI.operands_begin() + 1,
2831 {MachineOperand::CreateReg(ResTypeReg, false)});
2850 SPIRV::FPFastMathMode::None);
2852 SPIRV::FPFastMathMode::None);
2854 SPIRV::FPFastMathMode::None);
2861 size_t BitWidth = Ty->getScalarSizeInBits();
2865 assert(Index >= 0 && Index < 3 &&
2866 "Expected FPFastMathDefaultInfo for half, float, or double");
2867 assert(FPFastMathDefaultInfoVec.
size() == 3 &&
2868 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2869 return FPFastMathDefaultInfoVec[Index];
2872static void collectFPFastMathDefaults(
const Module &M,
2875 if (!
ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2))
2884 auto Node = M.getNamedMetadata(
"spirv.ExecutionMode");
2888 for (
unsigned i = 0; i <
Node->getNumOperands(); i++) {
2897 if (EM == SPIRV::ExecutionMode::FPFastMathDefault) {
2899 "Expected 4 operands for FPFastMathDefault");
2910 Info.FastMathFlags = Flags;
2911 Info.FPFastMathDefault =
true;
2912 }
else if (EM == SPIRV::ExecutionMode::ContractionOff) {
2914 "Expected no operands for ContractionOff");
2921 Info.ContractionOff =
true;
2923 }
else if (EM == SPIRV::ExecutionMode::SignedZeroInfNanPreserve) {
2925 "Expected 1 operand for SignedZeroInfNanPreserve");
2926 unsigned TargetWidth =
2935 assert(Index >= 0 && Index < 3 &&
2936 "Expected FPFastMathDefaultInfo for half, float, or double");
2937 assert(FPFastMathDefaultInfoVec.
size() == 3 &&
2938 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2939 FPFastMathDefaultInfoVec[Index].SignedZeroInfNanPreserve =
true;
2950 SPIRVTargetMachine &TM =
2954 TII = ST->getInstrInfo();
2960 patchPhis(M, GR, *TII, MMI);
2962 addMBBNames(M, *TII, MMI, *ST,
MAI);
2963 collectFPFastMathDefaults(M,
MAI, *ST);
2964 addDecorations(M, *TII, MMI, *ST,
MAI, GR);
2966 collectReqs(M,
MAI, MMI, *ST);
2970 collectReqs(M,
MAI, MMI, *ST);
2971 collectDeclarations(M);
2974 numberRegistersGlobally(M);
2977 processOtherInstrs(M);
2981 MAI.Reqs.addCapability(SPIRV::Capability::Linkage);
2984 GR->setBound(
MAI.MaxID);
MachineInstrBuilder & UseMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
ReachingDefInfo InstSet & ToRemove
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
static Register UseReg(const MachineOperand &MO)
const HexagonInstrInfo * TII
Promote Memory to Register
MachineInstr unsigned OpIdx
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
static SPIRV::FPFastMathDefaultInfoVector & getOrCreateFPFastMathDefaultInfoVec(const Module &M, DenseMap< Function *, SPIRV::FPFastMathDefaultInfoVector > &FPFastMathDefaultInfoMap, Function *F)
static SPIRV::FPFastMathDefaultInfo & getFPFastMathDefaultInfo(SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec, const Type *Ty)
#define ATOM_FLT_REQ_EXT_MSG(ExtName)
static cl::opt< bool > SPVDumpDeps("spv-dump-deps", cl::desc("Dump MIR with SPIR-V dependencies info"), cl::Optional, cl::init(false))
static cl::list< SPIRV::Capability::Capability > AvoidCapabilities("avoid-spirv-capabilities", cl::desc("SPIR-V capabilities to avoid if there are " "other options enabling a feature"), cl::Hidden, cl::values(clEnumValN(SPIRV::Capability::Shader, "Shader", "SPIR-V Shader capability")))
#define SPIRV_BACKEND_SERVICE_FUN_NAME
Target-Independent Code Generator Pass Configuration Options pass.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
bool isValid() const
Return true if the attribute is any kind of attribute.
This is the shared class of boolean and integer constants.
This is an important base class in LLVM.
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Wrapper class representing physical registers. Should be passed by value.
constexpr bool isValid() const
const MDOperand & getOperand(unsigned I) const
unsigned getNumOperands() const
Return number of MDNode operands.
Tracking metadata reference owned by Metadata.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineFunctionProperties & getProperties() const
Get the function properties.
Register getReg(unsigned Idx) const
Get the register for the operand index.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
unsigned getNumOperands() const
Retuns the total number of operands.
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const MachineOperand & getOperand(unsigned i) const
This class contains meta information specific to a module.
LLVM_ABI MachineFunction * getMachineFunction(const Function &F) const
Returns the MachineFunction associated to IR function F if there is one, otherwise nullptr.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
LLVM_ABI void print(raw_ostream &os, const TargetRegisterInfo *TRI=nullptr) const
Print the MachineOperand to os.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateImm(int64_t Val)
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
LLVM_ABI void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
iterator_range< reg_instr_iterator > reg_instructions(Register Reg) const
iterator_range< use_instr_iterator > use_instructions(Register Reg) const
LLVM_ABI MachineInstr * getUniqueVRegDef(Register Reg) const
getUniqueVRegDef - Return the unique machine instr that defines the specified virtual register or nul...
A Module instance is used to store all the information related to an LLVM module.
virtual void print(raw_ostream &OS, const Module *M) const
print - Print out the internal state of the pass.
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
unsigned getScalarOrVectorBitWidth(SPIRVTypeInst Type) const
const Type * getTypeForSPIRVType(SPIRVTypeInst Ty) const
Register getSPIRVTypeID(SPIRVTypeInst SpirvType) const
SPIRVTypeInst getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
bool isConstantInstr(const MachineInstr &MI) const
const SPIRVInstrInfo * getInstrInfo() const override
SPIRVGlobalRegistry * getSPIRVGlobalRegistry() const
const SPIRVSubtarget * getSubtargetImpl() const
bool isTypeIntN(unsigned N=0) const
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
bool contains(const T &V) const
Check if the SmallSet contains the given element.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
static LLVM_ABI Type * getDoubleTy(LLVMContext &C)
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
Represents a version number in the form major[.minor[.subminor[.build]]].
bool empty() const
Determine whether this version information is empty (e.g., all version components are zero).
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
SmallVector< const MachineInstr * > InstrList
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
NodeAddr< InstrNode * > Instr
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
FunctionAddr VTableAddr Value
std::string getStringImm(const MachineInstr &MI, unsigned StartIndex)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
hash_code hash_value(const FixedPointSemantics &Val)
ExtensionList getSymbolicOperandExtensions(SPIRV::OperandCategory::OperandCategory Category, uint32_t Value)
CapabilityList getSymbolicOperandCapabilities(SPIRV::OperandCategory::OperandCategory Category, uint32_t Value)
SmallVector< SPIRV::Extension::Extension, 8 > ExtensionList
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
SmallVector< size_t > InstrSignature
VersionTuple getSymbolicOperandMaxVersion(SPIRV::OperandCategory::OperandCategory Category, uint32_t Value)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
CapabilityList getCapabilitiesEnabledByExtension(SPIRV::Extension::Extension Extension)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
std::string getSymbolicOperandMnemonic(SPIRV::OperandCategory::OperandCategory Category, int32_t Value)
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
DWARFExpression::Operation Op
VersionTuple getSymbolicOperandMinVersion(SPIRV::OperandCategory::OperandCategory Category, uint32_t Value)
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
SmallVector< SPIRV::Capability::Capability, 8 > CapabilityList
std::set< InstrSignature > InstrTraces
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
std::map< SmallVector< size_t >, unsigned > InstrGRegsMap
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
SmallSet< SPIRV::Capability::Capability, 4 > S
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
SPIRV::ModuleAnalysisInfo MAI
bool runOnModule(Module &M) override
runOnModule - Virtual method overriden by subclasses to process the module being operated on.
static size_t computeFPFastMathDefaultInfoVecIndex(size_t BitWidth)
void setSkipEmission(const MachineInstr *MI)
MCRegister getRegisterAlias(const MachineFunction *MF, Register Reg)
MCRegister getOrCreateMBBRegister(const MachineBasicBlock &MBB)
InstrList MS[NUM_MODULE_SECTIONS]
AddressingModel::AddressingModel Addr
void setRegisterAlias(const MachineFunction *MF, Register Reg, MCRegister AliasReg)
DenseMap< const Function *, SPIRV::FPFastMathDefaultInfoVector > FPFastMathDefaultInfoMap
void addCapabilities(const CapabilityList &ToAdd)
bool isCapabilityAvailable(Capability::Capability Cap) const
void checkSatisfiable(const SPIRVSubtarget &ST) const
void getAndAddRequirements(SPIRV::OperandCategory::OperandCategory Category, uint32_t i, const SPIRVSubtarget &ST)
void addExtension(Extension::Extension ToAdd)
void initAvailableCapabilities(const SPIRVSubtarget &ST)
void removeCapabilityIf(const Capability::Capability ToRemove, const Capability::Capability IfPresent)
void addCapability(Capability::Capability ToAdd)
void addAvailableCaps(const CapabilityList &ToAdd)
void addRequirements(const Requirements &Req)
const std::optional< Capability::Capability > Cap
const VersionTuple MinVer
const VersionTuple MaxVer