20#include "llvm/IR/IntrinsicsSPIRV.h"
25#define DEBUG_TYPE "spirv-builtins"
29#define GET_BuiltinGroup_DECL
30#include "SPIRVGenTables.inc"
34 InstructionSet::InstructionSet
Set;
40#define GET_DemangledBuiltins_DECL
41#define GET_DemangledBuiltins_IMPL
63 InstructionSet::InstructionSet
Set;
67#define GET_NativeBuiltins_DECL
68#define GET_NativeBuiltins_IMPL
86#define GET_GroupBuiltins_DECL
87#define GET_GroupBuiltins_IMPL
97#define GET_IntelSubgroupsBuiltins_DECL
98#define GET_IntelSubgroupsBuiltins_IMPL
105#define GET_AtomicFloatingBuiltins_DECL
106#define GET_AtomicFloatingBuiltins_IMPL
113#define GET_GroupUniformBuiltins_DECL
114#define GET_GroupUniformBuiltins_IMPL
118 InstructionSet::InstructionSet
Set;
123#define GET_GetBuiltins_DECL
124#define GET_GetBuiltins_IMPL
128 InstructionSet::InstructionSet
Set;
132#define GET_ImageQueryBuiltins_DECL
133#define GET_ImageQueryBuiltins_IMPL
141#define GET_IntegerDotProductBuiltins_DECL
142#define GET_IntegerDotProductBuiltins_IMPL
146 InstructionSet::InstructionSet
Set;
157 InstructionSet::InstructionSet
Set;
165#define GET_ConvertBuiltins_DECL
166#define GET_ConvertBuiltins_IMPL
168using namespace InstructionSet;
169#define GET_VectorLoadStoreBuiltins_DECL
170#define GET_VectorLoadStoreBuiltins_IMPL
172#define GET_CLMemoryScope_DECL
173#define GET_CLSamplerAddressingMode_DECL
174#define GET_CLMemoryFenceFlags_DECL
175#define GET_ExtendedBuiltins_DECL
176#include "SPIRVGenTables.inc"
188 StringRef PassPrefix =
"(anonymous namespace)::";
189 std::string BuiltinName;
192 BuiltinName = DemangledCall.
substr(PassPrefix.
size());
194 BuiltinName = DemangledCall;
197 BuiltinName = BuiltinName.
substr(0, BuiltinName.find(
'('));
200 if (BuiltinName.rfind(
"__spirv_ocl_", 0) == 0)
201 BuiltinName = BuiltinName.
substr(12);
206 std::size_t Pos1 = BuiltinName.
rfind(
'<');
207 if (Pos1 != std::string::npos && BuiltinName.back() ==
'>') {
208 std::size_t Pos2 = BuiltinName.rfind(
' ', Pos1);
209 if (Pos2 == std::string::npos)
213 BuiltinName = BuiltinName.substr(Pos2, Pos1 - Pos2);
214 BuiltinName = BuiltinName.substr(BuiltinName.find_last_of(
' ') + 1);
241 static const std::regex SpvWithR(
242 "(__spirv_(ImageSampleExplicitLod|ImageRead|ImageWrite|ImageQuerySizeLod|"
244 "SDotKHR|SUDotKHR|SDotAccSatKHR|UDotAccSatKHR|SUDotAccSatKHR|"
245 "ReadClockKHR|SubgroupBlockReadINTEL|SubgroupImageBlockReadINTEL|"
246 "SubgroupImageMediaBlockReadINTEL|SubgroupImageMediaBlockWriteINTEL|"
248 "UConvert|SConvert|FConvert|SatConvert)[^_]*)(_R[^_]*_?(\\w+)?.*)?");
250 if (std::regex_match(BuiltinName, Match, SpvWithR) && Match.size() > 1) {
251 std::ssub_match SubMatch;
252 if (DecorationId && Match.size() > 3) {
257 BuiltinName = SubMatch.str();
274static std::unique_ptr<const SPIRV::IncomingCall>
276 SPIRV::InstructionSet::InstructionSet Set,
283 DemangledCall.
slice(DemangledCall.
find(
'(') + 1, DemangledCall.
find(
')'));
284 BuiltinArgs.
split(BuiltinArgumentTypes,
',', -1,
false);
289 if ((Builtin = SPIRV::lookupBuiltin(BuiltinName, Set)))
290 return std::make_unique<SPIRV::IncomingCall>(
291 BuiltinName, Builtin, ReturnRegister, ReturnType,
Arguments);
296 if (BuiltinArgumentTypes.
size() >= 1) {
297 char FirstArgumentType = BuiltinArgumentTypes[0][0];
302 switch (FirstArgumentType) {
305 if (Set == SPIRV::InstructionSet::OpenCL_std)
307 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
315 if (Set == SPIRV::InstructionSet::OpenCL_std)
317 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
324 if (Set == SPIRV::InstructionSet::OpenCL_std ||
325 Set == SPIRV::InstructionSet::GLSL_std_450)
331 if (!Prefix.empty() &&
332 (Builtin = SPIRV::lookupBuiltin(Prefix + BuiltinName, Set)))
333 return std::make_unique<SPIRV::IncomingCall>(
334 BuiltinName, Builtin, ReturnRegister, ReturnType,
Arguments);
341 switch (FirstArgumentType) {
362 if (!Suffix.empty() &&
363 (Builtin = SPIRV::lookupBuiltin(BuiltinName + Suffix, Set)))
364 return std::make_unique<SPIRV::IncomingCall>(
365 BuiltinName, Builtin, ReturnRegister, ReturnType,
Arguments);
380 assert(
MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST &&
381 MI->getOperand(1).isReg());
382 Register BitcastReg =
MI->getOperand(1).getReg();
396 assert(
DefMI->getOpcode() == TargetOpcode::G_CONSTANT &&
397 DefMI->getOperand(1).isCImm());
398 return DefMI->getOperand(1).getCImm()->getValue().getZExtValue();
410 Register ValueReg =
MI->getOperand(0).getReg();
416 assert(Ty &&
"Type is expected");
428 if (
MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE)
429 return MI->getOperand(1).getGlobal()->getType();
431 "Blocks in OpenCL C must be traceable to allocation site");
443static std::tuple<Register, SPIRVType *>
449 if (ResultType->
getOpcode() == SPIRV::OpTypeVector) {
464 return std::make_tuple(ResultRegister, BoolType);
475 if (ReturnType->getOpcode() == SPIRV::OpTypeVector) {
486 return MIRBuilder.
buildSelect(ReturnRegister, SourceRegister, TrueConst,
496 if (!DestinationReg.isValid())
501 MIRBuilder.
buildLoad(DestinationReg, PtrRegister, PtrInfo,
Align());
502 return DestinationReg;
511 const std::optional<SPIRV::LinkageType::LinkageType> &LinkageTy = {
512 SPIRV::LinkageType::Import}) {
520 VariableType, MIRBuilder, SPIRV::StorageClass::Input);
526 SPIRV::StorageClass::Input,
nullptr, isConst, LinkageTy,
533 return LoadedRegister;
546static SPIRV::MemorySemantics::MemorySemantics
549 case std::memory_order_relaxed:
550 return SPIRV::MemorySemantics::None;
551 case std::memory_order_acquire:
552 return SPIRV::MemorySemantics::Acquire;
553 case std::memory_order_release:
554 return SPIRV::MemorySemantics::Release;
555 case std::memory_order_acq_rel:
556 return SPIRV::MemorySemantics::AcquireRelease;
557 case std::memory_order_seq_cst:
558 return SPIRV::MemorySemantics::SequentiallyConsistent;
566 case SPIRV::CLMemoryScope::memory_scope_work_item:
567 return SPIRV::Scope::Invocation;
568 case SPIRV::CLMemoryScope::memory_scope_work_group:
569 return SPIRV::Scope::Workgroup;
570 case SPIRV::CLMemoryScope::memory_scope_device:
571 return SPIRV::Scope::Device;
572 case SPIRV::CLMemoryScope::memory_scope_all_svm_devices:
573 return SPIRV::Scope::CrossDevice;
574 case SPIRV::CLMemoryScope::memory_scope_sub_group:
575 return SPIRV::Scope::Subgroup;
588 SPIRV::Scope::Scope Scope,
592 if (CLScopeRegister.
isValid()) {
597 if (CLScope ==
static_cast<unsigned>(Scope)) {
598 MRI->setRegClass(CLScopeRegister, &SPIRV::iIDRegClass);
599 return CLScopeRegister;
607 if (
MRI->getRegClassOrNull(
Reg))
611 SpvType ? GR->
getRegClass(SpvType) : &SPIRV::iIDRegClass);
615 Register PtrRegister,
unsigned &Semantics,
618 if (SemanticsRegister.
isValid()) {
620 std::memory_order Order =
625 if (
static_cast<unsigned>(Order) == Semantics) {
626 MRI->setRegClass(SemanticsRegister, &SPIRV::iIDRegClass);
627 return SemanticsRegister;
640 unsigned Sz =
Call->Arguments.size() - ImmArgs.size();
641 for (
unsigned i = 0; i < Sz; ++i)
642 MIB.addUse(
Call->Arguments[i]);
651 if (
Call->isSpirvOp())
655 "Need 2 arguments for atomic init translation");
667 if (
Call->isSpirvOp())
675 Call->Arguments.size() > 1
679 if (
Call->Arguments.size() > 2) {
681 MemSemanticsReg =
Call->Arguments[2];
684 SPIRV::MemorySemantics::SequentiallyConsistent |
702 if (
Call->isSpirvOp())
710 SPIRV::MemorySemantics::SequentiallyConsistent |
725 if (
Call->isSpirvOp())
729 bool IsCmpxchg =
Call->Builtin->Name.contains(
"cmpxchg");
736 LLT DesiredLLT =
MRI->getType(Desired);
739 SPIRV::OpTypePointer);
742 assert(IsCmpxchg ? ExpectedType == SPIRV::OpTypeInt
743 : ExpectedType == SPIRV::OpTypePointer);
748 auto StorageClass =
static_cast<SPIRV::StorageClass::StorageClass
>(
756 ? SPIRV::MemorySemantics::None
757 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
760 ? SPIRV::MemorySemantics::None
761 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
762 if (
Call->Arguments.size() >= 4) {
764 "Need 5+ args for explicit atomic cmpxchg");
771 if (
static_cast<unsigned>(MemOrdEq) == MemSemEqual)
772 MemSemEqualReg =
Call->Arguments[3];
773 if (
static_cast<unsigned>(MemOrdNeq) == MemSemEqual)
774 MemSemUnequalReg =
Call->Arguments[4];
778 if (!MemSemUnequalReg.
isValid())
782 auto Scope = IsCmpxchg ? SPIRV::Scope::Workgroup : SPIRV::Scope::Device;
783 if (
Call->Arguments.size() >= 6) {
785 "Extra args for explicit atomic cmpxchg");
786 auto ClScope =
static_cast<SPIRV::CLMemoryScope
>(
789 if (ClScope ==
static_cast<unsigned>(Scope))
790 ScopeReg =
Call->Arguments[5];
800 Register Tmp = !IsCmpxchg ?
MRI->createGenericVirtualRegister(DesiredLLT)
801 :
Call->ReturnRegister;
802 if (!
MRI->getRegClassOrNull(Tmp))
826 if (
Call->isSpirvOp())
835 "Too many args for explicit atomic RMW");
836 ScopeRegister =
buildScopeReg(ScopeRegister, SPIRV::Scope::Workgroup,
837 MIRBuilder, GR,
MRI);
840 unsigned Semantics = SPIRV::MemorySemantics::None;
844 Semantics, MIRBuilder, GR);
848 if (
Call->ReturnType->getOpcode() == SPIRV::OpTypeFloat) {
849 if (Opcode == SPIRV::OpAtomicIAdd) {
850 Opcode = SPIRV::OpAtomicFAddEXT;
851 }
else if (Opcode == SPIRV::OpAtomicISub) {
854 Opcode = SPIRV::OpAtomicFAddEXT;
856 MRI->createGenericVirtualRegister(
MRI->getType(ValueReg));
865 ValueReg = NegValueReg;
884 "Wrong number of atomic floating-type builtin");
904 bool IsSet = Opcode == SPIRV::OpAtomicFlagTestAndSet;
906 if (
Call->isSpirvOp())
912 unsigned Semantics = SPIRV::MemorySemantics::SequentiallyConsistent;
916 Semantics, MIRBuilder, GR);
918 assert((Opcode != SPIRV::OpAtomicFlagClear ||
919 (Semantics != SPIRV::MemorySemantics::Acquire &&
920 Semantics != SPIRV::MemorySemantics::AcquireRelease)) &&
921 "Invalid memory order argument!");
944 if ((Opcode == SPIRV::OpControlBarrierArriveINTEL ||
945 Opcode == SPIRV::OpControlBarrierWaitINTEL) &&
946 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_split_barrier)) {
947 std::string DiagMsg = std::string(Builtin->
Name) +
948 ": the builtin requires the following SPIR-V "
949 "extension: SPV_INTEL_split_barrier";
953 if (
Call->isSpirvOp())
958 unsigned MemSemantics = SPIRV::MemorySemantics::None;
960 if (MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE)
961 MemSemantics |= SPIRV::MemorySemantics::WorkgroupMemory;
963 if (MemFlags & SPIRV::CLK_GLOBAL_MEM_FENCE)
964 MemSemantics |= SPIRV::MemorySemantics::CrossWorkgroupMemory;
966 if (MemFlags & SPIRV::CLK_IMAGE_MEM_FENCE)
967 MemSemantics |= SPIRV::MemorySemantics::ImageMemory;
969 if (Opcode == SPIRV::OpMemoryBarrier)
973 else if (Opcode == SPIRV::OpControlBarrierArriveINTEL)
974 MemSemantics |= SPIRV::MemorySemantics::Release;
975 else if (Opcode == SPIRV::OpControlBarrierWaitINTEL)
976 MemSemantics |= SPIRV::MemorySemantics::Acquire;
978 MemSemantics |= SPIRV::MemorySemantics::SequentiallyConsistent;
981 MemFlags == MemSemantics
985 SPIRV::Scope::Scope Scope = SPIRV::Scope::Workgroup;
986 SPIRV::Scope::Scope MemScope = Scope;
987 if (
Call->Arguments.size() >= 2) {
989 ((Opcode != SPIRV::OpMemoryBarrier &&
Call->Arguments.size() == 2) ||
990 (Opcode == SPIRV::OpMemoryBarrier &&
Call->Arguments.size() == 3)) &&
991 "Extra args for explicitly scoped barrier");
992 Register ScopeArg = (Opcode == SPIRV::OpMemoryBarrier) ?
Call->Arguments[2]
993 :
Call->Arguments[1];
994 SPIRV::CLMemoryScope CLScope =
997 if (!(MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE) ||
998 (Opcode == SPIRV::OpMemoryBarrier))
1000 if (CLScope ==
static_cast<unsigned>(Scope))
1001 ScopeReg =
Call->Arguments[1];
1008 if (Opcode != SPIRV::OpMemoryBarrier)
1010 MIB.
addUse(MemSemanticsReg);
1022 if ((Opcode == SPIRV::OpBitFieldInsert ||
1023 Opcode == SPIRV::OpBitFieldSExtract ||
1024 Opcode == SPIRV::OpBitFieldUExtract || Opcode == SPIRV::OpBitReverse) &&
1025 !ST->canUseExtension(SPIRV::Extension::SPV_KHR_bit_instructions)) {
1026 std::string DiagMsg = std::string(Builtin->
Name) +
1027 ": the builtin requires the following SPIR-V "
1028 "extension: SPV_KHR_bit_instructions";
1033 if (
Call->isSpirvOp())
1040 for (
unsigned i = 0; i <
Call->Arguments.size(); ++i)
1052 if (
Call->isSpirvOp())
1069 if (
Call->isSpirvOp())
1076 for (
unsigned i = 0; i <
Call->Arguments.size(); ++i)
1088 if (
Call->isSpirvOp())
1094 for (
unsigned i = 0; i <
Call->Arguments.size(); ++i)
1104 case SPIRV::OpCommitReadPipe:
1105 case SPIRV::OpCommitWritePipe:
1107 case SPIRV::OpGroupCommitReadPipe:
1108 case SPIRV::OpGroupCommitWritePipe:
1109 case SPIRV::OpGroupReserveReadPipePackets:
1110 case SPIRV::OpGroupReserveWritePipePackets: {
1114 MRI->setRegClass(ScopeConstReg, &SPIRV::iIDRegClass);
1118 if (Opcode == SPIRV::OpGroupReserveReadPipePackets ||
1119 Opcode == SPIRV::OpGroupReserveWritePipePackets)
1123 MIB.
addUse(ScopeConstReg);
1124 for (
unsigned int i = 0; i <
Call->Arguments.size(); ++i)
1137 case SPIRV::Dim::DIM_1D:
1138 case SPIRV::Dim::DIM_Buffer:
1140 case SPIRV::Dim::DIM_2D:
1141 case SPIRV::Dim::DIM_Cube:
1142 case SPIRV::Dim::DIM_Rect:
1144 case SPIRV::Dim::DIM_3D:
1157 return arrayed ? numComps + 1 : numComps;
1170 SPIRV::lookupExtendedBuiltin(Builtin->
Name, Builtin->
Set)->Number;
1177 if (ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2) &&
1178 (
Number == SPIRV::OpenCLExtInst::fmin_common ||
1179 Number == SPIRV::OpenCLExtInst::fmax_common)) {
1181 ? SPIRV::OpenCLExtInst::fmin
1182 : SPIRV::OpenCLExtInst::fmax;
1190 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
1196 if (OrigNumber == SPIRV::OpenCLExtInst::fmin_common ||
1197 OrigNumber == SPIRV::OpenCLExtInst::fmax_common) {
1211 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
1215 std::tie(CompareRegister, RelationType) =
1228 Call->ReturnType, GR);
1236 SPIRV::lookupGroupBuiltin(Builtin->
Name);
1239 if (
Call->isSpirvOp()) {
1242 if (GroupBuiltin->
Opcode ==
1243 SPIRV::OpSubgroupMatrixMultiplyAccumulateINTEL &&
1244 Call->Arguments.size() > 4)
1253 if (!
MI ||
MI->getOpcode() != TargetOpcode::G_CONSTANT)
1255 "Group Operation parameter must be an integer constant");
1256 uint64_t GrpOp =
MI->getOperand(1).getCImm()->getValue().getZExtValue();
1263 for (
unsigned i = 2; i <
Call->Arguments.size(); ++i)
1276 if (ArgInstruction->
getOpcode() == TargetOpcode::G_CONSTANT) {
1277 if (BoolRegType->
getOpcode() != SPIRV::OpTypeBool)
1281 if (BoolRegType->
getOpcode() == SPIRV::OpTypeInt) {
1283 MRI->setRegClass(Arg0, &SPIRV::iIDRegClass);
1290 }
else if (BoolRegType->
getOpcode() != SPIRV::OpTypeBool) {
1302 const bool HasBoolReturnTy =
1307 if (HasBoolReturnTy)
1308 std::tie(GroupResultRegister, GroupResultType) =
1311 auto Scope = Builtin->
Name.
starts_with(
"sub_group") ? SPIRV::Scope::Subgroup
1312 : SPIRV::Scope::Workgroup;
1316 if (GroupBuiltin->
Opcode == SPIRV::OpGroupBroadcast &&
1317 Call->Arguments.size() > 2) {
1325 if (!ElemType || ElemType->
getOpcode() != SPIRV::OpTypeInt)
1327 unsigned VecLen =
Call->Arguments.size() - 1;
1328 VecReg =
MRI->createGenericVirtualRegister(
1330 MRI->setRegClass(VecReg, &SPIRV::vIDRegClass);
1336 for (
unsigned i = 1; i <
Call->Arguments.size(); i++) {
1337 MIB.addUse(
Call->Arguments[i]);
1346 .
addDef(GroupResultRegister)
1352 if (
Call->Arguments.size() > 0) {
1353 MIB.addUse(Arg0.
isValid() ? Arg0 :
Call->Arguments[0]);
1358 for (
unsigned i = 1; i <
Call->Arguments.size(); i++)
1359 MIB.addUse(
Call->Arguments[i]);
1363 if (HasBoolReturnTy)
1365 Call->ReturnType, GR);
1376 SPIRV::lookupIntelSubgroupsBuiltin(Builtin->
Name);
1378 if (IntelSubgroups->
IsMedia &&
1379 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_media_block_io)) {
1380 std::string DiagMsg = std::string(Builtin->
Name) +
1381 ": the builtin requires the following SPIR-V "
1382 "extension: SPV_INTEL_media_block_io";
1384 }
else if (!IntelSubgroups->
IsMedia &&
1385 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1386 std::string DiagMsg = std::string(Builtin->
Name) +
1387 ": the builtin requires the following SPIR-V "
1388 "extension: SPV_INTEL_subgroups";
1393 if (
Call->isSpirvOp()) {
1394 bool IsSet = OpCode != SPIRV::OpSubgroupBlockWriteINTEL &&
1395 OpCode != SPIRV::OpSubgroupImageBlockWriteINTEL &&
1396 OpCode != SPIRV::OpSubgroupImageMediaBlockWriteINTEL;
1402 if (IntelSubgroups->
IsBlock) {
1405 if (Arg0Type->getOpcode() == SPIRV::OpTypeImage) {
1411 case SPIRV::OpSubgroupBlockReadINTEL:
1412 OpCode = SPIRV::OpSubgroupImageBlockReadINTEL;
1414 case SPIRV::OpSubgroupBlockWriteINTEL:
1415 OpCode = SPIRV::OpSubgroupImageBlockWriteINTEL;
1438 for (
size_t i = 0; i <
Call->Arguments.size(); ++i)
1449 if (!ST->canUseExtension(
1450 SPIRV::Extension::SPV_KHR_uniform_group_instructions)) {
1451 std::string DiagMsg = std::string(Builtin->
Name) +
1452 ": the builtin requires the following SPIR-V "
1453 "extension: SPV_KHR_uniform_group_instructions";
1457 SPIRV::lookupGroupUniformBuiltin(Builtin->
Name);
1467 if (!Const || Const->getOpcode() != TargetOpcode::G_CONSTANT)
1469 "expect a constant group operation for a uniform group instruction",
1472 if (!ConstOperand.
isCImm())
1482 MIB.addUse(ValueReg);
1493 if (!ST->canUseExtension(SPIRV::Extension::SPV_KHR_shader_clock)) {
1494 std::string DiagMsg = std::string(Builtin->
Name) +
1495 ": the builtin requires the following SPIR-V "
1496 "extension: SPV_KHR_shader_clock";
1502 if (Builtin->
Name ==
"__spirv_ReadClockKHR") {
1509 SPIRV::Scope::Scope ScopeArg =
1511 .
EndsWith(
"device", SPIRV::Scope::Scope::Device)
1512 .
EndsWith(
"work_group", SPIRV::Scope::Scope::Workgroup)
1513 .
EndsWith(
"sub_group", SPIRV::Scope::Scope::Subgroup);
1554 SPIRV::BuiltIn::BuiltIn BuiltinValue,
1557 const unsigned ResultWidth =
Call->ReturnType->getOperand(1).getImm();
1568 bool IsConstantIndex =
1569 IndexInstruction->getOpcode() == TargetOpcode::G_CONSTANT;
1575 if (PointerSize != ResultWidth) {
1576 DefaultReg =
MRI->createGenericVirtualRegister(
LLT::scalar(PointerSize));
1577 MRI->setRegClass(DefaultReg, &SPIRV::iIDRegClass);
1579 MIRBuilder.
getMF());
1580 ToTruncate = DefaultReg;
1584 MIRBuilder.
buildCopy(DefaultReg, NewRegister);
1593 if (!IsConstantIndex || PointerSize != ResultWidth) {
1594 Extracted =
MRI->createGenericVirtualRegister(
LLT::scalar(PointerSize));
1595 MRI->setRegClass(Extracted, &SPIRV::iIDRegClass);
1602 ExtractInst.
addUse(LoadedVector).
addUse(IndexRegister);
1605 if (!IsConstantIndex) {
1614 MRI->setRegClass(CompareRegister, &SPIRV::iIDRegClass);
1629 if (PointerSize != ResultWidth) {
1632 MRI->setRegClass(SelectionResult, &SPIRV::iIDRegClass);
1634 MIRBuilder.
getMF());
1637 MIRBuilder.
buildSelect(SelectionResult, CompareRegister, Extracted,
1639 ToTruncate = SelectionResult;
1641 ToTruncate = Extracted;
1645 if (PointerSize != ResultWidth)
1655 SPIRV::BuiltIn::BuiltIn
Value =
1656 SPIRV::lookupGetBuiltin(Builtin->
Name, Builtin->
Set)->
Value;
1658 if (
Value == SPIRV::BuiltIn::GlobalInvocationId)
1664 if (
Call->ReturnType->getOpcode() == SPIRV::OpTypeVector)
1671 LLType,
Call->ReturnRegister);
1680 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
1683 case SPIRV::OpStore:
1685 case SPIRV::OpAtomicLoad:
1687 case SPIRV::OpAtomicStore:
1689 case SPIRV::OpAtomicCompareExchange:
1690 case SPIRV::OpAtomicCompareExchangeWeak:
1693 case SPIRV::OpAtomicIAdd:
1694 case SPIRV::OpAtomicISub:
1695 case SPIRV::OpAtomicOr:
1696 case SPIRV::OpAtomicXor:
1697 case SPIRV::OpAtomicAnd:
1698 case SPIRV::OpAtomicExchange:
1700 case SPIRV::OpMemoryBarrier:
1702 case SPIRV::OpAtomicFlagTestAndSet:
1703 case SPIRV::OpAtomicFlagClear:
1706 if (
Call->isSpirvOp())
1718 unsigned Opcode = SPIRV::lookupAtomicFloatingBuiltin(Builtin->
Name)->Opcode;
1721 case SPIRV::OpAtomicFAddEXT:
1722 case SPIRV::OpAtomicFMinEXT:
1723 case SPIRV::OpAtomicFMaxEXT:
1736 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
1747 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
1749 if (Opcode == SPIRV::OpGenericCastToPtrExplicit) {
1750 SPIRV::StorageClass::StorageClass ResSC =
1761 MIRBuilder.
buildInstr(TargetOpcode::G_ADDRSPACE_CAST)
1772 if (
Call->isSpirvOp())
1777 SPIRV::OpTypeVector;
1779 uint32_t OC = IsVec ? SPIRV::OpDot : SPIRV::OpFMulS;
1780 bool IsSwapReq =
false;
1785 (ST->canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
1789 SPIRV::lookupIntegerDotProductBuiltin(Builtin->
Name);
1799 bool IsFirstSigned = TypeStrs[0].trim()[0] !=
'u';
1800 bool IsSecondSigned = TypeStrs[1].trim()[0] !=
'u';
1802 if (
Call->BuiltinName ==
"dot") {
1803 if (IsFirstSigned && IsSecondSigned)
1805 else if (!IsFirstSigned && !IsSecondSigned)
1808 OC = SPIRV::OpSUDot;
1812 }
else if (
Call->BuiltinName ==
"dot_acc_sat") {
1813 if (IsFirstSigned && IsSecondSigned)
1814 OC = SPIRV::OpSDotAccSat;
1815 else if (!IsFirstSigned && !IsSecondSigned)
1816 OC = SPIRV::OpUDotAccSat;
1818 OC = SPIRV::OpSUDotAccSat;
1834 for (
size_t i = 2; i <
Call->Arguments.size(); ++i)
1837 for (
size_t i = 0; i <
Call->Arguments.size(); ++i)
1843 if (!IsVec && OC != SPIRV::OpFMulS)
1844 MIB.
addImm(SPIRV::PackedVectorFormat4x8Bit);
1853 SPIRV::BuiltIn::BuiltIn
Value =
1854 SPIRV::lookupGetBuiltin(Builtin->
Name, Builtin->
Set)->
Value;
1857 assert(
Call->ReturnType->getOpcode() == SPIRV::OpTypeInt);
1861 MIRBuilder,
Call->ReturnType, GR,
Value, LLType,
Call->ReturnRegister,
1862 false, std::nullopt);
1876 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
1883 if (RetType->
getOpcode() != SPIRV::OpTypeStruct)
1885 "overflow builtins");
1889 if (!OpType1 || !OpType2 || OpType1 != OpType2)
1891 if (OpType1->
getOpcode() == SPIRV::OpTypeVector)
1893 case SPIRV::OpIAddCarryS:
1894 Opcode = SPIRV::OpIAddCarryV;
1896 case SPIRV::OpISubBorrowS:
1897 Opcode = SPIRV::OpISubBorrowV;
1902 Register ResReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1904 MRI->getRegClassOrNull(
Call->Arguments[1])) {
1905 MRI->setRegClass(ResReg, DstRC);
1906 MRI->setType(ResReg,
MRI->getType(
Call->Arguments[1]));
1924 SPIRV::BuiltIn::BuiltIn
Value =
1925 SPIRV::lookupGetBuiltin(
Call->Builtin->Name,
Call->Builtin->Set)->
Value;
1926 const bool IsDefaultOne = (
Value == SPIRV::BuiltIn::GlobalSize ||
1927 Value == SPIRV::BuiltIn::NumWorkgroups ||
1928 Value == SPIRV::BuiltIn::WorkgroupSize ||
1929 Value == SPIRV::BuiltIn::EnqueuedWorkgroupSize);
1939 SPIRV::lookupImageQueryBuiltin(Builtin->
Name, Builtin->
Set)->Component;
1943 unsigned NumExpectedRetComponents =
1944 Call->ReturnType->getOpcode() == SPIRV::OpTypeVector
1945 ?
Call->ReturnType->getOperand(2).getImm()
1952 if (NumExpectedRetComponents != NumActualRetComponents) {
1953 unsigned Bitwidth =
Call->ReturnType->getOpcode() == SPIRV::OpTypeInt
1954 ?
Call->ReturnType->getOperand(1).getImm()
1961 IntTy, NumActualRetComponents, MIRBuilder,
true);
1966 IsDimBuf ? SPIRV::OpImageQuerySize : SPIRV::OpImageQuerySizeLod;
1973 if (NumExpectedRetComponents == NumActualRetComponents)
1975 if (NumExpectedRetComponents == 1) {
1977 unsigned ExtractedComposite =
1978 Component == 3 ? NumActualRetComponents - 1 : Component;
1979 assert(ExtractedComposite < NumActualRetComponents &&
1980 "Invalid composite index!");
1983 if (QueryResultType->
getOpcode() == SPIRV::OpTypeVector) {
1985 if (TypeReg != NewTypeReg &&
1987 TypeReg = NewTypeReg;
1989 MIRBuilder.
buildInstr(SPIRV::OpCompositeExtract)
1993 .
addImm(ExtractedComposite);
1994 if (NewType !=
nullptr)
1999 auto MIB = MIRBuilder.
buildInstr(SPIRV::OpVectorShuffle)
2004 for (
unsigned i = 0; i < NumExpectedRetComponents; ++i)
2005 MIB.
addImm(i < NumActualRetComponents ? i : 0xffffffff);
2013 assert(
Call->ReturnType->getOpcode() == SPIRV::OpTypeInt &&
2014 "Image samples query result must be of int type!");
2019 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2022 SPIRV::Dim::Dim ImageDimensionality =
static_cast<SPIRV::Dim::Dim
>(
2024 (void)ImageDimensionality;
2027 case SPIRV::OpImageQuerySamples:
2028 assert(ImageDimensionality == SPIRV::Dim::DIM_2D &&
2029 "Image must be of 2D dimensionality");
2031 case SPIRV::OpImageQueryLevels:
2032 assert((ImageDimensionality == SPIRV::Dim::DIM_1D ||
2033 ImageDimensionality == SPIRV::Dim::DIM_2D ||
2034 ImageDimensionality == SPIRV::Dim::DIM_3D ||
2035 ImageDimensionality == SPIRV::Dim::DIM_Cube) &&
2036 "Image must be of 1D/2D/3D/Cube dimensionality");
2048static SPIRV::SamplerAddressingMode::SamplerAddressingMode
2050 switch (Bitmask & SPIRV::CLK_ADDRESS_MODE_MASK) {
2051 case SPIRV::CLK_ADDRESS_CLAMP:
2052 return SPIRV::SamplerAddressingMode::Clamp;
2053 case SPIRV::CLK_ADDRESS_CLAMP_TO_EDGE:
2054 return SPIRV::SamplerAddressingMode::ClampToEdge;
2055 case SPIRV::CLK_ADDRESS_REPEAT:
2056 return SPIRV::SamplerAddressingMode::Repeat;
2057 case SPIRV::CLK_ADDRESS_MIRRORED_REPEAT:
2058 return SPIRV::SamplerAddressingMode::RepeatMirrored;
2059 case SPIRV::CLK_ADDRESS_NONE:
2060 return SPIRV::SamplerAddressingMode::None;
2067 return (Bitmask & SPIRV::CLK_NORMALIZED_COORDS_TRUE) ? 1 : 0;
2070static SPIRV::SamplerFilterMode::SamplerFilterMode
2072 if (Bitmask & SPIRV::CLK_FILTER_LINEAR)
2073 return SPIRV::SamplerFilterMode::Linear;
2074 if (Bitmask & SPIRV::CLK_FILTER_NEAREST)
2075 return SPIRV::SamplerFilterMode::Nearest;
2076 return SPIRV::SamplerFilterMode::Nearest;
2083 if (
Call->isSpirvOp())
2090 if (HasOclSampler) {
2104 Register SampledImage =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2115 if (
Call->ReturnType->getOpcode() != SPIRV::OpTypeVector) {
2119 MRI->createGenericVirtualRegister(GR->
getRegType(TempType));
2122 MIRBuilder.
buildInstr(SPIRV::OpImageSampleExplicitLod)
2127 .
addImm(SPIRV::ImageOperand::Lod)
2129 MIRBuilder.
buildInstr(SPIRV::OpCompositeExtract)
2135 MIRBuilder.
buildInstr(SPIRV::OpImageSampleExplicitLod)
2140 .
addImm(SPIRV::ImageOperand::Lod)
2143 }
else if (HasMsaa) {
2149 .
addImm(SPIRV::ImageOperand::Sample)
2164 if (
Call->isSpirvOp())
2179 if (
Call->Builtin->Name.contains_insensitive(
2180 "__translate_sampler_initializer")) {
2187 return Sampler.isValid();
2188 }
else if (
Call->Builtin->Name.contains_insensitive(
"__spirv_SampledImage")) {
2195 Call->ReturnRegister.isValid()
2196 ?
Call->ReturnRegister
2197 :
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2204 }
else if (
Call->Builtin->Name.contains_insensitive(
2205 "__spirv_ImageSampleExplicitLod")) {
2207 std::string ReturnType = DemangledCall.
str();
2208 if (DemangledCall.
contains(
"_R")) {
2209 ReturnType = ReturnType.substr(ReturnType.find(
"_R") + 2);
2210 ReturnType = ReturnType.substr(0, ReturnType.find(
'('));
2217 std::string DiagMsg =
2218 "Unable to recognize SPIRV type name: " + ReturnType;
2221 MIRBuilder.
buildInstr(SPIRV::OpImageSampleExplicitLod)
2226 .
addImm(SPIRV::ImageOperand::Lod)
2236 Call->Arguments[1],
Call->Arguments[2]);
2244 SPIRV::OpCompositeConstructContinuedINTEL,
2245 Call->Arguments,
Call->ReturnRegister,
2255 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2256 bool IsSet = Opcode != SPIRV::OpCooperativeMatrixStoreKHR &&
2257 Opcode != SPIRV::OpCooperativeMatrixStoreCheckedINTEL &&
2258 Opcode != SPIRV::OpCooperativeMatrixPrefetchINTEL;
2259 unsigned ArgSz =
Call->Arguments.size();
2260 unsigned LiteralIdx = 0;
2263 case SPIRV::OpCooperativeMatrixLoadKHR:
2264 LiteralIdx = ArgSz > 3 ? 3 : 0;
2266 case SPIRV::OpCooperativeMatrixStoreKHR:
2267 LiteralIdx = ArgSz > 4 ? 4 : 0;
2269 case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
2270 LiteralIdx = ArgSz > 7 ? 7 : 0;
2272 case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
2273 LiteralIdx = ArgSz > 8 ? 8 : 0;
2276 case SPIRV::OpCooperativeMatrixMulAddKHR:
2277 LiteralIdx = ArgSz > 3 ? 3 : 0;
2283 if (Opcode == SPIRV::OpCooperativeMatrixPrefetchINTEL) {
2285 auto MIB = MIRBuilder.
buildInstr(SPIRV::OpCooperativeMatrixPrefetchINTEL)
2302 if (Opcode == SPIRV::OpCooperativeMatrixLengthKHR) {
2313 IsSet ? TypeReg :
Register(0), ImmArgs);
2322 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2326 case SPIRV::OpSpecConstant: {
2336 (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
2337 Const->getOpcode() == TargetOpcode::G_FCONSTANT) &&
2338 "Argument should be either an int or floating-point constant");
2341 if (
Call->ReturnType->getOpcode() == SPIRV::OpTypeBool) {
2342 assert(ConstOperand.
isCImm() &&
"Int constant operand is expected");
2344 ? SPIRV::OpSpecConstantTrue
2345 : SPIRV::OpSpecConstantFalse;
2351 if (
Call->ReturnType->getOpcode() != SPIRV::OpTypeBool) {
2352 if (Const->getOpcode() == TargetOpcode::G_CONSTANT)
2359 case SPIRV::OpSpecConstantComposite: {
2361 SPIRV::OpSpecConstantCompositeContinuedINTEL,
2362 Call->Arguments,
Call->ReturnRegister,
2377 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2388 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2398 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2412 const LLT ValTy =
MRI->getType(InputReg);
2413 Register ActualRetValReg =
MRI->createGenericVirtualRegister(ValTy);
2416 InputReg =
Call->Arguments[1];
2419 if (InputType->getTypeID() == llvm::Type::TypeID::TypedPointerTyID) {
2420 LLT InputLLT =
MRI->getType(InputReg);
2421 PtrInputReg =
MRI->createGenericVirtualRegister(InputLLT);
2427 MIRBuilder.
buildLoad(PtrInputReg, InputReg, *MMO1);
2428 MRI->setRegClass(PtrInputReg, &SPIRV::iIDRegClass);
2432 for (
unsigned index = 2; index < 7; index++) {
2447 unsigned Size = ValTy.getSizeInBytes();
2451 MRI->setRegClass(ActualRetValReg, &SPIRV::pIDRegClass);
2452 MIRBuilder.
buildStore(ActualRetValReg,
Call->Arguments[0], *MMO);
2455 for (
unsigned index = 1; index < 6; index++)
2468 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2480 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2491 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2501 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2503 unsigned Scope = SPIRV::Scope::Workgroup;
2505 Scope = SPIRV::Scope::Subgroup;
2515 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2517 bool IsSet = Opcode != SPIRV::OpPredicatedStoreINTEL;
2518 unsigned ArgSz =
Call->Arguments.size();
2528 IsSet ? TypeReg :
Register(0), ImmArgs);
2541 Register TmpReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2545 unsigned NumArgs =
Call->Arguments.size();
2547 Register GlobalWorkSize =
Call->Arguments[NumArgs < 4 ? 1 : 2];
2549 NumArgs == 2 ?
Register(0) :
Call->Arguments[NumArgs < 4 ? 2 : 3];
2554 if (SpvTy->
getOpcode() == SPIRV::OpTypePointer) {
2560 unsigned Size =
Call->Builtin->Name ==
"ndrange_3D" ? 3 : 2;
2565 FieldTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
true);
2566 GlobalWorkSize =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2575 SpvFieldTy, *ST.getInstrInfo());
2580 LocalWorkSize = Const;
2581 if (!GlobalWorkOffset.
isValid())
2582 GlobalWorkOffset = Const;
2590 .
addUse(GlobalWorkOffset);
2604 SPIRV::AccessQualifier::ReadWrite,
true);
2612 bool IsSpirvOp =
Call->isSpirvOp();
2613 bool HasEvents =
Call->Builtin->Name.contains(
"events") || IsSpirvOp;
2620 if (
Call->Builtin->Name.contains(
"_varargs") || IsSpirvOp) {
2621 const unsigned LocalSizeArrayIdx = HasEvents ? 9 : 6;
2629 assert(LocalSizeTy &&
"Local size type is expected");
2635 Int32Ty, MIRBuilder, SPIRV::StorageClass::Function);
2636 for (
unsigned I = 0;
I < LocalSizeNum; ++
I) {
2638 MRI->setType(
Reg, LLType);
2652 auto MIB = MIRBuilder.
buildInstr(SPIRV::OpEnqueueKernel)
2657 const unsigned BlockFIdx = HasEvents ? 6 : 3;
2658 for (
unsigned i = 0; i < BlockFIdx; i++)
2659 MIB.addUse(
Call->Arguments[i]);
2666 MIB.addUse(NullPtr);
2667 MIB.addUse(NullPtr);
2675 Register BlockLiteralReg =
Call->Arguments[BlockFIdx + 1];
2677 MIB.addUse(BlockLiteralReg);
2687 for (
unsigned i = 0; i < LocalSizes.
size(); i++)
2688 MIB.addUse(LocalSizes[i]);
2698 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2701 case SPIRV::OpRetainEvent:
2702 case SPIRV::OpReleaseEvent:
2704 case SPIRV::OpCreateUserEvent:
2705 case SPIRV::OpGetDefaultQueue:
2709 case SPIRV::OpIsValidEvent:
2714 case SPIRV::OpSetUserEventStatus:
2718 case SPIRV::OpCaptureEventProfilingInfo:
2723 case SPIRV::OpBuildNDRange:
2725 case SPIRV::OpEnqueueKernel:
2738 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2740 bool IsSet = Opcode == SPIRV::OpGroupAsyncCopy;
2742 if (
Call->isSpirvOp())
2749 case SPIRV::OpGroupAsyncCopy: {
2751 Call->ReturnType->getOpcode() == SPIRV::OpTypeEvent
2755 unsigned NumArgs =
Call->Arguments.size();
2765 ?
Call->Arguments[3]
2768 if (NewType !=
nullptr)
2773 case SPIRV::OpGroupWaitEvents:
2789 SPIRV::lookupConvertBuiltin(
Call->Builtin->Name,
Call->Builtin->Set);
2791 if (!Builtin &&
Call->isSpirvOp()) {
2794 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2799 assert(Builtin &&
"Conversion builtin not found.");
2802 SPIRV::Decoration::SaturatedConversion, {});
2805 SPIRV::Decoration::FPRoundingMode,
2806 {(unsigned)Builtin->RoundingMode});
2808 std::string NeedExtMsg;
2809 bool IsRightComponentsNumber =
true;
2810 unsigned Opcode = SPIRV::OpNop;
2817 : SPIRV::OpSatConvertSToU;
2820 : SPIRV::OpSConvert;
2822 SPIRV::OpTypeFloat)) {
2826 &MIRBuilder.
getMF().getSubtarget());
2827 if (!ST->canUseExtension(
2828 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2829 NeedExtMsg =
"SPV_INTEL_bfloat16_conversion";
2830 IsRightComponentsNumber =
2833 Opcode = SPIRV::OpConvertBF16ToFINTEL;
2835 bool IsSourceSigned =
2837 Opcode = IsSourceSigned ? SPIRV::OpConvertSToF : SPIRV::OpConvertUToF;
2841 SPIRV::OpTypeFloat)) {
2847 &MIRBuilder.
getMF().getSubtarget());
2848 if (!ST->canUseExtension(
2849 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2850 NeedExtMsg =
"SPV_INTEL_bfloat16_conversion";
2851 IsRightComponentsNumber =
2854 Opcode = SPIRV::OpConvertFToBF16INTEL;
2857 : SPIRV::OpConvertFToU;
2860 SPIRV::OpTypeFloat)) {
2863 &MIRBuilder.
getMF().getSubtarget());
2864 if (!ST->canUseExtension(
2865 SPIRV::Extension::SPV_INTEL_tensor_float32_conversion))
2866 NeedExtMsg =
"SPV_INTEL_tensor_float32_conversion";
2867 IsRightComponentsNumber =
2870 Opcode = SPIRV::OpRoundFToTF32INTEL;
2873 Opcode = SPIRV::OpFConvert;
2878 if (!NeedExtMsg.empty()) {
2879 std::string DiagMsg = std::string(Builtin->
Name) +
2880 ": the builtin requires the following SPIR-V "
2885 if (!IsRightComponentsNumber) {
2886 std::string DiagMsg =
2887 std::string(Builtin->
Name) +
2888 ": result and argument must have the same number of components";
2891 assert(Opcode != SPIRV::OpNop &&
2892 "Conversion between the types not implemented!");
2906 SPIRV::lookupVectorLoadStoreBuiltin(
Call->Builtin->Name,
2907 Call->Builtin->Set);
2913 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
2933 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2934 bool IsLoad = Opcode == SPIRV::OpLoad;
2938 MIB.addDef(
Call->ReturnRegister);
2946 MIB.addUse(
Call->Arguments[1]);
2948 unsigned NumArgs =
Call->Arguments.size();
2949 if ((IsLoad && NumArgs >= 2) || NumArgs >= 3)
2951 if ((IsLoad && NumArgs >= 3) || NumArgs >= 4)
2964std::tuple<int, unsigned, unsigned>
2966 SPIRV::InstructionSet::InstructionSet Set) {
2969 std::unique_ptr<const IncomingCall>
Call =
2972 return std::make_tuple(-1, 0, 0);
2974 switch (
Call->Builtin->Group) {
2975 case SPIRV::Relational:
2977 case SPIRV::Barrier:
2978 case SPIRV::CastToPtr:
2979 case SPIRV::ImageMiscQuery:
2980 case SPIRV::SpecConstant:
2981 case SPIRV::Enqueue:
2982 case SPIRV::AsyncCopy:
2983 case SPIRV::LoadStore:
2984 case SPIRV::CoopMatr:
2986 SPIRV::lookupNativeBuiltin(
Call->Builtin->Name,
Call->Builtin->Set))
2987 return std::make_tuple(
Call->Builtin->Group, R->Opcode, 0);
2989 case SPIRV::Extended:
2990 if (
const auto *R = SPIRV::lookupExtendedBuiltin(
Call->Builtin->Name,
2991 Call->Builtin->Set))
2992 return std::make_tuple(
Call->Builtin->Group, 0, R->Number);
2994 case SPIRV::VectorLoadStore:
2995 if (
const auto *R = SPIRV::lookupVectorLoadStoreBuiltin(
Call->Builtin->Name,
2996 Call->Builtin->Set))
2997 return std::make_tuple(SPIRV::Extended, 0, R->Number);
3000 if (
const auto *R = SPIRV::lookupGroupBuiltin(
Call->Builtin->Name))
3001 return std::make_tuple(
Call->Builtin->Group, R->Opcode, 0);
3003 case SPIRV::AtomicFloating:
3004 if (
const auto *R = SPIRV::lookupAtomicFloatingBuiltin(
Call->Builtin->Name))
3005 return std::make_tuple(
Call->Builtin->Group, R->Opcode, 0);
3007 case SPIRV::IntelSubgroups:
3008 if (
const auto *R = SPIRV::lookupIntelSubgroupsBuiltin(
Call->Builtin->Name))
3009 return std::make_tuple(
Call->Builtin->Group, R->Opcode, 0);
3011 case SPIRV::GroupUniform:
3012 if (
const auto *R = SPIRV::lookupGroupUniformBuiltin(
Call->Builtin->Name))
3013 return std::make_tuple(
Call->Builtin->Group, R->Opcode, 0);
3015 case SPIRV::IntegerDot:
3017 SPIRV::lookupIntegerDotProductBuiltin(
Call->Builtin->Name))
3018 return std::make_tuple(
Call->Builtin->Group, R->Opcode, 0);
3020 case SPIRV::WriteImage:
3021 return std::make_tuple(
Call->Builtin->Group, SPIRV::OpImageWrite, 0);
3023 return std::make_tuple(
Call->Builtin->Group, TargetOpcode::G_SELECT, 0);
3024 case SPIRV::Construct:
3025 return std::make_tuple(
Call->Builtin->Group, SPIRV::OpCompositeConstruct,
3027 case SPIRV::KernelClock:
3028 return std::make_tuple(
Call->Builtin->Group, SPIRV::OpReadClockKHR, 0);
3030 return std::make_tuple(-1, 0, 0);
3032 return std::make_tuple(-1, 0, 0);
3036 SPIRV::InstructionSet::InstructionSet Set,
3041 LLVM_DEBUG(
dbgs() <<
"Lowering builtin call: " << DemangledCall <<
"\n");
3045 assert(SpvType &&
"Inconsistent return register: expected valid type info");
3046 std::unique_ptr<const IncomingCall>
Call =
3051 return std::nullopt;
3055 assert(Args.size() >=
Call->Builtin->MinNumArgs &&
3056 "Too few arguments to generate the builtin");
3057 if (
Call->Builtin->MaxNumArgs && Args.size() >
Call->Builtin->MaxNumArgs)
3058 LLVM_DEBUG(
dbgs() <<
"More arguments provided than required!\n");
3061 switch (
Call->Builtin->Group) {
3062 case SPIRV::Extended:
3064 case SPIRV::Relational:
3068 case SPIRV::Variable:
3072 case SPIRV::AtomicFloating:
3074 case SPIRV::Barrier:
3076 case SPIRV::CastToPtr:
3079 case SPIRV::IntegerDot:
3083 case SPIRV::ICarryBorrow:
3085 case SPIRV::GetQuery:
3087 case SPIRV::ImageSizeQuery:
3089 case SPIRV::ImageMiscQuery:
3091 case SPIRV::ReadImage:
3093 case SPIRV::WriteImage:
3095 case SPIRV::SampleImage:
3099 case SPIRV::Construct:
3101 case SPIRV::SpecConstant:
3103 case SPIRV::Enqueue:
3105 case SPIRV::AsyncCopy:
3107 case SPIRV::Convert:
3109 case SPIRV::VectorLoadStore:
3111 case SPIRV::LoadStore:
3113 case SPIRV::IntelSubgroups:
3115 case SPIRV::GroupUniform:
3117 case SPIRV::KernelClock:
3119 case SPIRV::CoopMatr:
3121 case SPIRV::ExtendedBitOps:
3123 case SPIRV::BindlessINTEL:
3125 case SPIRV::TernaryBitwiseINTEL:
3127 case SPIRV::Block2DLoadStore:
3131 case SPIRV::PredicatedLoadStore:
3133 case SPIRV::BlockingPipes:
3135 case SPIRV::ArbitraryPrecisionFixedPoint:
3146 [[maybe_unused]]
bool IsOCLBuiltinType = TypeStr.
consume_front(
"ocl_");
3147 assert(IsOCLBuiltinType &&
"Invalid OpenCL builtin prefix");
3164 unsigned VecElts = 0;
3175 TypeStr = TypeStr.
substr(0, TypeStr.
find(
']'));
3187 auto Pos1 = DemangledCall.
find(
'(');
3190 auto Pos2 = DemangledCall.
find(
')');
3193 DemangledCall.
slice(Pos1 + 1, Pos2)
3194 .
split(BuiltinArgsTypeStrs,
',', -1,
false);
3202 if (ArgIdx >= BuiltinArgsTypeStrs.
size())
3204 StringRef TypeStr = BuiltinArgsTypeStrs[ArgIdx].trim();
3213#define GET_BuiltinTypes_DECL
3214#define GET_BuiltinTypes_IMPL
3221#define GET_OpenCLTypes_DECL
3222#define GET_OpenCLTypes_IMPL
3224#include "SPIRVGenTables.inc"
3232 if (Name.starts_with(
"void"))
3234 else if (Name.starts_with(
"int") || Name.starts_with(
"uint"))
3236 else if (Name.starts_with(
"float"))
3238 else if (Name.starts_with(
"half"))
3251 unsigned Opcode = TypeRecord->
Opcode;
3266 "Invalid number of parameters for SPIR-V pipe builtin!");
3269 SPIRV::AccessQualifier::AccessQualifier(
3277 "Invalid number of parameters for SPIR-V coop matrices builtin!");
3279 "SPIR-V coop matrices builtin type must have a type parameter!");
3282 SPIRV::AccessQualifier::ReadWrite,
true);
3285 MIRBuilder, ExtensionType, ElemType, ExtensionType->
getIntParameter(0),
3294 OpaqueType, SPIRV::AccessQualifier::ReadOnly, MIRBuilder);
3303 "Inline SPIR-V type builtin takes an opcode, size, and alignment "
3310 if (ParamEType->getName() ==
"spirv.IntegralConstant") {
3311 assert(ParamEType->getNumTypeParameters() == 1 &&
3312 "Inline SPIR-V integral constant builtin must have a type "
3314 assert(ParamEType->getNumIntParameters() == 1 &&
3315 "Inline SPIR-V integral constant builtin must have a "
3318 auto OperandValue = ParamEType->getIntParameter(0);
3319 auto *OperandType = ParamEType->getTypeParameter(0);
3322 OperandType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
true);
3325 OperandValue, MIRBuilder, OperandSPIRVType,
true)));
3327 }
else if (ParamEType->getName() ==
"spirv.Literal") {
3328 assert(ParamEType->getNumTypeParameters() == 0 &&
3329 "Inline SPIR-V literal builtin does not take type "
3331 assert(ParamEType->getNumIntParameters() == 1 &&
3332 "Inline SPIR-V literal builtin must have an integer "
3335 auto OperandValue = ParamEType->getIntParameter(0);
3342 Param, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
true);
3354 "Vulkan buffers have exactly one type for the type of the buffer.");
3356 "Vulkan buffer have 2 integer parameters: storage class and is "
3360 auto SC =
static_cast<SPIRV::StorageClass::StorageClass
>(
3375 StringRef NameWithParameters = TypeName;
3382 SPIRV::lookupOpenCLType(NameWithParameters);
3385 NameWithParameters);
3393 "Unknown builtin opaque type!");
3397 if (!NameWithParameters.
contains(
'_'))
3401 unsigned BaseNameLength = NameWithParameters.
find(
'_') - 1;
3405 bool HasTypeParameter = !
isDigit(Parameters[0][0]);
3406 if (HasTypeParameter)
3409 for (
unsigned i = HasTypeParameter ? 1 : 0; i < Parameters.size(); i++) {
3410 unsigned IntParameter = 0;
3411 bool ValidLiteral = !Parameters[i].getAsInteger(10, IntParameter);
3414 "Invalid format of SPIR-V builtin parameter literal!");
3418 NameWithParameters.
substr(0, BaseNameLength),
3419 TypeParameters, IntParameters);
3423 SPIRV::AccessQualifier::AccessQualifier AccessQual,
3445 if (Name ==
"spirv.Type") {
3447 }
else if (Name ==
"spirv.VulkanBuffer") {
3449 }
else if (Name ==
"spirv.Padding") {
3451 }
else if (Name ==
"spirv.Layout") {
3465 switch (TypeRecord->
Opcode) {
3466 case SPIRV::OpTypeImage:
3469 case SPIRV::OpTypePipe:
3472 case SPIRV::OpTypeDeviceEvent:
3475 case SPIRV::OpTypeSampler:
3478 case SPIRV::OpTypeSampledImage:
3481 case SPIRV::OpTypeCooperativeMatrixKHR:
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Lower Kernel Arguments
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Promote Memory to Register
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
static const fltSemantics & IEEEsingle()
APInt bitcastToAPInt() const
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
uint64_t getZExtValue() const
Get zero extended value.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
@ ICMP_ULT
unsigned less than
const APFloat & getValueAPF() const
const APInt & getValue() const
Return the constant as an APInt value reference.
A parsed version of the target data layout string in and methods for querying it.
Tagged union holding either a T or a Error.
Class to represent fixed width SIMD vectors.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
This is an important class for using LLVM in a threaded context.
static MCOperand createReg(MCRegister Reg)
static MCOperand createImm(int64_t Val)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
LLVMContext & getContext() const
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
const DataLayout & getDataLayout() const
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
LLVM_ABI void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
const GlobalValue * getGlobal() const
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
const MDNode * getMetadata() const
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
LLVM_ABI void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
unsigned getNumVirtRegs() const
getNumVirtRegs - Return the number of virtual registers created.
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
SPIRVType * getOrCreateOpTypePipe(MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AccQual)
SPIRVType * getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
SPIRVType * getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder, bool EmitIR)
SPIRVType * getOrCreatePaddingType(MachineIRBuilder &MIRBuilder)
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, const MachineFunction &MF)
Register buildGlobalVariable(Register Reg, SPIRVType *BaseType, StringRef Name, const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage, const MachineInstr *Init, bool IsConst, const std::optional< SPIRV::LinkageType::LinkageType > &LinkageType, MachineIRBuilder &MIRBuilder, bool IsInstSelector)
const Type * getTypeForSPIRVType(const SPIRVType *Ty) const
SPIRVType * getOrCreateUnknownType(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode, const ArrayRef< MCOperand > Operands)
unsigned getScalarOrVectorComponentCount(Register VReg) const
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineInstr &I, SPIRV::AccessQualifier::AccessQualifier AQ, bool EmitIR)
unsigned getPointerSize() const
SPIRVType * getOrCreateSPIRVPointerType(const Type *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SC)
SPIRVType * getOrCreateOpTypeByOpcode(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode)
Register buildConstantFP(APFloat Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr)
SPIRVType * getPointeeType(SPIRVType *PtrType)
Register getSPIRVTypeID(const SPIRVType *SpirvType) const
bool isScalarOfType(Register VReg, unsigned TypeOpcode) const
SPIRVType * getOrCreateOpTypeSampledImage(SPIRVType *ImageType, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateVulkanBufferType(MachineIRBuilder &MIRBuilder, Type *ElemType, SPIRV::StorageClass::StorageClass SC, bool IsWritable, bool EmitIr=false)
SPIRVType * getOrCreateSPIRVTypeByName(StringRef TypeStr, MachineIRBuilder &MIRBuilder, bool EmitIR, SPIRV::StorageClass::StorageClass SC=SPIRV::StorageClass::Function, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite)
SPIRVType * getOrCreateLayoutType(MachineIRBuilder &MIRBuilder, const TargetExtType *T, bool EmitIr=false)
Register getOrCreateConsIntVector(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, bool EmitIR)
const TargetRegisterClass * getRegClass(SPIRVType *SpvType) const
SPIRVType * getOrCreateSPIRVVectorType(SPIRVType *BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder, bool EmitIR)
SPIRVType * getOrCreateOpTypeCoopMatr(MachineIRBuilder &MIRBuilder, const TargetExtType *ExtensionType, const SPIRVType *ElemType, uint32_t Scope, uint32_t Rows, uint32_t Columns, uint32_t Use, bool EmitIR)
bool isScalarOrVectorOfType(Register VReg, unsigned TypeOpcode) const
Register getOrCreateConstIntArray(uint64_t Val, size_t Num, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
SPIRVType * getOrCreateOpTypeDeviceEvent(MachineIRBuilder &MIRBuilder)
SPIRVType * getImageType(const TargetExtType *ExtensionType, const SPIRV::AccessQualifier::AccessQualifier Qualifier, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineIRBuilder &MIRBuilder)
Register buildConstantInt(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, bool EmitIR, bool ZeroAsNull=true)
LLT getRegType(SPIRVType *SpvType) const
SPIRV::StorageClass::StorageClass getPointerStorageClass(Register VReg) const
SPIRVType * getOrCreateOpTypeSampler(MachineIRBuilder &MIRBuilder)
Register buildConstantSampler(Register Res, unsigned AddrMode, unsigned Param, unsigned FilerMode, MachineIRBuilder &MIRBuilder)
Register getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorBitWidth(const SPIRVType *Type) const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
static constexpr size_t npos
bool consume_back(StringRef Suffix)
Returns true if this StringRef has the given suffix and removes that suffix.
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
std::string str() const
str - Get the contents as an std::string.
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
bool contains_insensitive(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
constexpr size_t size() const
size - Get the string size.
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
bool consume_front(StringRef Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
size_t find_first_of(char C, size_t From=0) const
Find the first character in the string that is C, or npos if not found.
size_t rfind(char C, size_t From=npos) const
Search for the last character C in the string.
size_t find(char C, size_t From=0) const
Search for the first character C in the string.
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
A switch()-like statement whose cases are string literals.
StringSwitch & EndsWith(StringLiteral S, T Value)
Class to represent struct types.
Class to represent target extensions types, which are generally unintrospectable from target-independ...
ArrayRef< Type * > type_params() const
Return the type parameters for this particular target extension type.
unsigned getNumIntParameters() const
static LLVM_ABI TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types={}, ArrayRef< unsigned > Ints={})
Return a target extension type having the specified name and optional type and integer parameters.
Type * getTypeParameter(unsigned i) const
unsigned getNumTypeParameters() const
unsigned getIntParameter(unsigned i) const
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
LLVM_ABI StringRef getStructName() const
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
bool isVoidTy() const
Return true if this is 'void'.
LLVM Value Representation.
LLVM_ABI Value(Type *Ty, unsigned scid)
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Represents a version number in the form major[.minor[.subminor[.build]]].
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
LLVM_C_ABI LLVMTypeRef LLVMVectorType(LLVMTypeRef ElementType, unsigned ElementCount)
Create a vector type that contains a defined type and has a specific number of elements.
std::string lookupBuiltinNameHelper(StringRef DemangledCall, FPDecorationId *DecorationId)
Parses the name part of the demangled builtin call.
Type * parseBuiltinCallArgumentType(StringRef TypeStr, LLVMContext &Ctx)
bool parseBuiltinTypeStr(SmallVector< StringRef, 10 > &BuiltinArgsTypeStrs, const StringRef DemangledCall, LLVMContext &Ctx)
std::optional< bool > lowerBuiltin(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, MachineIRBuilder &MIRBuilder, const Register OrigRet, const Type *OrigRetTy, const SmallVectorImpl< Register > &Args, SPIRVGlobalRegistry *GR, const CallBase &CB)
std::tuple< int, unsigned, unsigned > mapBuiltinToOpcode(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set)
Helper function for finding a builtin function attributes by a demangled function name.
Type * parseBuiltinCallArgumentBaseType(const StringRef DemangledCall, unsigned ArgIdx, LLVMContext &Ctx)
Parses the provided ArgIdx argument base type in the DemangledCall skeleton.
TargetExtType * parseBuiltinTypeNameToTargetExtType(std::string TypeName, LLVMContext &Context)
Translates a string representing a SPIR-V or OpenCL builtin type to a TargetExtType that can be furth...
SPIRVType * lowerBuiltinType(const Type *OpaqueType, SPIRV::AccessQualifier::AccessQualifier AccessQual, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
static bool build2DBlockIOINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's 2d block io instructions.
static SPIRVType * getVulkanBufferType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateExtInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, const CallBase &CB)
void insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy, SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Helper external function for inserting ASSIGN_TYPE instuction between Reg and its definition,...
static bool generateBindlessImageINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateGetQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getInlineSpirvType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateConstructInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicFlagInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic flag instructions (e.g.
static bool generateImageSizeQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::SamplerFilterMode::SamplerFilterMode getSamplerFilterModeFromBitmask(unsigned Bitmask)
static bool buildAtomicStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic store instruction.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
static bool buildExtendedBitOpsInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building extended bit operations.
static const Type * getBlockStructType(Register ParamReg, MachineRegisterInfo *MRI)
static bool generateGroupInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
FPDecorationId demangledPostfixToDecorationId(const std::string &S)
static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim)
static bool generateICarryBorrowInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildScopeReg(Register CLScopeRegister, SPIRV::Scope::Scope Scope, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI)
static std::tuple< Register, SPIRVType * > buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType, SPIRVGlobalRegistry *GR)
Helper function building either a resulting scalar or vector bool register depending on the expected ...
static unsigned getNumSizeComponents(SPIRVType *imgType)
Helper function for obtaining the number of size components.
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
static Register buildConstantIntReg32(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getSampledImageType(const TargetExtType *OpaqueType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
SmallVector< MachineInstr *, 4 > createContinuedInstructions(MachineIRBuilder &MIRBuilder, unsigned Opcode, unsigned MinWC, unsigned ContinuedOpcode, ArrayRef< Register > Args, Register ReturnRegister, Register TypeID)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
constexpr unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
static bool generateSampleImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateBarrierInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI void SplitString(StringRef Source, SmallVectorImpl< StringRef > &OutFragments, StringRef Delimiters=" \t\n\v\f\r")
SplitString - Split up the specified string according to the specified delimiters,...
static SPIRVType * getCoopMatrType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateKernelClockInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static void setRegClassIfNull(Register Reg, MachineRegisterInfo *MRI, SPIRVGlobalRegistry *GR)
static bool generateGroupUniformInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateWaveInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Register createVirtualRegister(SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building barriers, i.e., memory/control ordering operations.
static bool generateAsyncCopy(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope)
static bool buildAPFixedPointInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getSamplerType(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateBlockingPipesInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
static Register buildLoadInst(SPIRVType *BaseType, Register PtrRegister, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, LLT LowLevelType, Register DestinationReg=Register(0))
Helper function for building a load instruction loading into the DestinationReg.
static bool generateEnqueueInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
static bool buildSelectInst(MachineIRBuilder &MIRBuilder, Register ReturnRegister, Register SourceRegister, const SPIRVType *ReturnType, SPIRVGlobalRegistry *GR)
Helper function for building either a vector or scalar select instruction depending on the expected R...
static const Type * getMachineInstrType(MachineInstr *MI)
bool isDigit(char C)
Checks if character C is one of the 10 decimal digits.
static SPIRV::SamplerAddressingMode::SamplerAddressingMode getSamplerAddressingModeFromBitmask(unsigned Bitmask)
static bool generateAtomicInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
const MachineInstr SPIRVType
static SPIRVType * getLayoutType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateDotOrFMulInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildBuiltinVariableLoad(MachineIRBuilder &MIRBuilder, SPIRVType *VariableType, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType, Register Reg=Register(0), bool isConst=true, const std::optional< SPIRV::LinkageType::LinkageType > &LinkageTy={ SPIRV::LinkageType::Import})
Helper function for building a load instruction for loading a builtin global variable of BuiltinValue...
static bool generateConvertInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateTernaryBitwiseFunctionINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateCastToPtrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
constexpr bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
static Register buildMemSemanticsReg(Register SemanticsRegister, Register PtrRegister, unsigned &Semantics, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static unsigned getConstFromIntrinsic(Register Reg, MachineRegisterInfo *MRI)
static bool generateImageMiscQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSelectInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic load instruction.
static bool generateIntelSubgroupsInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateExtendedBitOpsInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildPipeInst(const SPIRV::IncomingCall *Call, unsigned Opcode, unsigned Scope, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSpecConstantInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getOrCreateSPIRVDeviceEventPointer(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * parseBasicTypeName(StringRef &TypeName, LLVMContext &Ctx)
static bool generateVectorLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, uint64_t DefaultValue)
static bool generateCoopMatrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildBindlessImageINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's bindless image instructions.
static std::unique_ptr< const SPIRV::IncomingCall > lookupBuiltin(StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
Looks up the demangled builtin call in the SPIRVBuiltins.td records using the provided DemangledCall ...
static bool buildAtomicFloatingRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic floating-type instruction.
MachineInstr * getDefInstrMaybeConstant(Register &ConstReg, const MachineRegisterInfo *MRI)
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
static bool generate2DBlockIOINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateReadImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool hasBuiltinTypePrefix(StringRef Name)
static bool buildEnqueueKernel(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * getMDOperandAsType(const MDNode *N, unsigned I)
static bool generatePipeInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildTernaryBitwiseFunctionINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's OpBitwiseFunctionINTEL instruction.
static bool generateAPFixedPointInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic instructions.
static SPIRV::MemorySemantics::MemorySemantics getSPIRVMemSemantics(std::memory_order MemOrder)
static bool generateRelationalInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicInitInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
Helper function for translating atomic init to OpStore.
static bool generateWriteImageInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getPipeType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Type * parseTypeString(const StringRef Name, LLVMContext &Context)
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
static bool generatePredicatedLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateAtomicFloatingInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildNDRange(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getNonParameterizedType(const TargetExtType *ExtensionType, const SPIRV::BuiltinType *TypeRecord, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static MachineInstr * getBlockStructInstr(Register ParamReg, MachineRegisterInfo *MRI)
static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode, const SPIRV::IncomingCall *Call, Register TypeReg, ArrayRef< uint32_t > ImmArgs={})
static unsigned getSamplerParamFromBitmask(unsigned Bitmask)
static bool buildAtomicCompareExchangeInst(const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic compare-exchange instruction.
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
static bool generateBuiltinVar(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Implement std::hash so that hash_code can be used in STL containers.
This struct is a compact representation of a valid (non-zero power of two) alignment.
This class contains a discriminated union of information about pointers in memory operands,...
FPRoundingMode::FPRoundingMode RoundingMode
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
const SmallVectorImpl< Register > & Arguments
const std::string BuiltinName
const SPIRVType * ReturnType
const Register ReturnRegister
const DemangledBuiltin * Builtin
IncomingCall(const std::string BuiltinName, const DemangledBuiltin *Builtin, const Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
InstructionSet::InstructionSet Set
StringRef SpirvTypeLiteral
InstructionSet::InstructionSet Set
FPRoundingMode::FPRoundingMode RoundingMode