20#include "llvm/IR/IntrinsicsSPIRV.h"
25#define DEBUG_TYPE "spirv-builtins"
29#define GET_BuiltinGroup_DECL
30#include "SPIRVGenTables.inc"
34 InstructionSet::InstructionSet
Set;
40#define GET_DemangledBuiltins_DECL
41#define GET_DemangledBuiltins_IMPL
63 InstructionSet::InstructionSet
Set;
67#define GET_NativeBuiltins_DECL
68#define GET_NativeBuiltins_IMPL
86#define GET_GroupBuiltins_DECL
87#define GET_GroupBuiltins_IMPL
97#define GET_IntelSubgroupsBuiltins_DECL
98#define GET_IntelSubgroupsBuiltins_IMPL
105#define GET_AtomicFloatingBuiltins_DECL
106#define GET_AtomicFloatingBuiltins_IMPL
113#define GET_GroupUniformBuiltins_DECL
114#define GET_GroupUniformBuiltins_IMPL
118 InstructionSet::InstructionSet
Set;
123#define GET_GetBuiltins_DECL
124#define GET_GetBuiltins_IMPL
128 InstructionSet::InstructionSet
Set;
132#define GET_ImageQueryBuiltins_DECL
133#define GET_ImageQueryBuiltins_IMPL
141#define GET_IntegerDotProductBuiltins_DECL
142#define GET_IntegerDotProductBuiltins_IMPL
146 InstructionSet::InstructionSet
Set;
157 InstructionSet::InstructionSet
Set;
165#define GET_ConvertBuiltins_DECL
166#define GET_ConvertBuiltins_IMPL
168using namespace InstructionSet;
169#define GET_VectorLoadStoreBuiltins_DECL
170#define GET_VectorLoadStoreBuiltins_IMPL
172#define GET_CLMemoryScope_DECL
173#define GET_CLSamplerAddressingMode_DECL
174#define GET_CLMemoryFenceFlags_DECL
175#define GET_ExtendedBuiltins_DECL
176#include "SPIRVGenTables.inc"
188 StringRef PassPrefix =
"(anonymous namespace)::";
189 std::string BuiltinName;
192 BuiltinName = DemangledCall.
substr(PassPrefix.
size());
194 BuiltinName = DemangledCall;
197 BuiltinName = BuiltinName.
substr(0, BuiltinName.find(
'('));
200 if (BuiltinName.rfind(
"__spirv_ocl_", 0) == 0)
201 BuiltinName = BuiltinName.
substr(12);
206 std::size_t Pos1 = BuiltinName.
rfind(
'<');
207 if (Pos1 != std::string::npos && BuiltinName.back() ==
'>') {
208 std::size_t Pos2 = BuiltinName.rfind(
' ', Pos1);
209 if (Pos2 == std::string::npos)
213 BuiltinName = BuiltinName.substr(Pos2, Pos1 - Pos2);
214 BuiltinName = BuiltinName.substr(BuiltinName.find_last_of(
' ') + 1);
241 static const std::regex SpvWithR(
242 "(__spirv_(ImageSampleExplicitLod|ImageRead|ImageWrite|ImageQuerySizeLod|"
244 "SDotKHR|SUDotKHR|SDotAccSatKHR|UDotAccSatKHR|SUDotAccSatKHR|"
245 "ReadClockKHR|SubgroupBlockReadINTEL|SubgroupImageBlockReadINTEL|"
246 "SubgroupImageMediaBlockReadINTEL|SubgroupImageMediaBlockWriteINTEL|"
248 "UConvert|SConvert|FConvert|SatConvert)[^_]*)(_R[^_]*_?(\\w+)?.*)?");
250 if (std::regex_match(BuiltinName, Match, SpvWithR) && Match.size() > 1) {
251 std::ssub_match SubMatch;
252 if (DecorationId && Match.size() > 3) {
257 BuiltinName = SubMatch.str();
274static std::unique_ptr<const SPIRV::IncomingCall>
276 SPIRV::InstructionSet::InstructionSet Set,
283 DemangledCall.
slice(DemangledCall.
find(
'(') + 1, DemangledCall.
find(
')'));
284 BuiltinArgs.
split(BuiltinArgumentTypes,
',', -1,
false);
289 if ((Builtin = SPIRV::lookupBuiltin(BuiltinName, Set)))
290 return std::make_unique<SPIRV::IncomingCall>(
291 BuiltinName, Builtin, ReturnRegister, ReturnType,
Arguments);
296 if (BuiltinArgumentTypes.
size() >= 1) {
297 char FirstArgumentType = BuiltinArgumentTypes[0][0];
302 switch (FirstArgumentType) {
305 if (Set == SPIRV::InstructionSet::OpenCL_std)
307 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
315 if (Set == SPIRV::InstructionSet::OpenCL_std)
317 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
324 if (Set == SPIRV::InstructionSet::OpenCL_std ||
325 Set == SPIRV::InstructionSet::GLSL_std_450)
331 if (!Prefix.empty() &&
332 (Builtin = SPIRV::lookupBuiltin(Prefix + BuiltinName, Set)))
333 return std::make_unique<SPIRV::IncomingCall>(
334 BuiltinName, Builtin, ReturnRegister, ReturnType,
Arguments);
341 switch (FirstArgumentType) {
362 if (!Suffix.empty() &&
363 (Builtin = SPIRV::lookupBuiltin(BuiltinName + Suffix, Set)))
364 return std::make_unique<SPIRV::IncomingCall>(
365 BuiltinName, Builtin, ReturnRegister, ReturnType,
Arguments);
380 assert(
MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST &&
381 MI->getOperand(1).isReg());
382 Register BitcastReg =
MI->getOperand(1).getReg();
396 assert(
DefMI->getOpcode() == TargetOpcode::G_CONSTANT &&
397 DefMI->getOperand(1).isCImm());
398 return DefMI->getOperand(1).getCImm()->getValue().getZExtValue();
410 Register ValueReg =
MI->getOperand(0).getReg();
416 assert(Ty &&
"Type is expected");
428 if (
MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE)
429 return MI->getOperand(1).getGlobal()->getType();
431 "Blocks in OpenCL C must be traceable to allocation site");
443static std::tuple<Register, SPIRVType *>
449 if (ResultType->
getOpcode() == SPIRV::OpTypeVector) {
464 return std::make_tuple(ResultRegister, BoolType);
475 if (ReturnType->getOpcode() == SPIRV::OpTypeVector) {
486 return MIRBuilder.
buildSelect(ReturnRegister, SourceRegister, TrueConst,
496 if (!DestinationReg.isValid())
501 MIRBuilder.
buildLoad(DestinationReg, PtrRegister, PtrInfo,
Align());
502 return DestinationReg;
511 const std::optional<SPIRV::LinkageType::LinkageType> &LinkageTy = {
512 SPIRV::LinkageType::Import}) {
520 VariableType, MIRBuilder, SPIRV::StorageClass::Input);
526 SPIRV::StorageClass::Input,
nullptr, isConst, LinkageTy,
533 return LoadedRegister;
543static SPIRV::MemorySemantics::MemorySemantics
546 case std::memory_order_relaxed:
547 return SPIRV::MemorySemantics::None;
548 case std::memory_order_acquire:
549 return SPIRV::MemorySemantics::Acquire;
550 case std::memory_order_release:
551 return SPIRV::MemorySemantics::Release;
552 case std::memory_order_acq_rel:
553 return SPIRV::MemorySemantics::AcquireRelease;
554 case std::memory_order_seq_cst:
555 return SPIRV::MemorySemantics::SequentiallyConsistent;
563 case SPIRV::CLMemoryScope::memory_scope_work_item:
564 return SPIRV::Scope::Invocation;
565 case SPIRV::CLMemoryScope::memory_scope_work_group:
566 return SPIRV::Scope::Workgroup;
567 case SPIRV::CLMemoryScope::memory_scope_device:
568 return SPIRV::Scope::Device;
569 case SPIRV::CLMemoryScope::memory_scope_all_svm_devices:
570 return SPIRV::Scope::CrossDevice;
571 case SPIRV::CLMemoryScope::memory_scope_sub_group:
572 return SPIRV::Scope::Subgroup;
585 SPIRV::Scope::Scope Scope,
589 if (CLScopeRegister.
isValid()) {
594 if (CLScope ==
static_cast<unsigned>(Scope)) {
595 MRI->setRegClass(CLScopeRegister, &SPIRV::iIDRegClass);
596 return CLScopeRegister;
604 if (
MRI->getRegClassOrNull(
Reg))
608 SpvType ? GR->
getRegClass(SpvType) : &SPIRV::iIDRegClass);
612 Register PtrRegister,
unsigned &Semantics,
615 if (SemanticsRegister.
isValid()) {
617 std::memory_order Order =
622 if (
static_cast<unsigned>(Order) == Semantics) {
623 MRI->setRegClass(SemanticsRegister, &SPIRV::iIDRegClass);
624 return SemanticsRegister;
637 unsigned Sz =
Call->Arguments.size() - ImmArgs.size();
638 for (
unsigned i = 0; i < Sz; ++i)
639 MIB.addUse(
Call->Arguments[i]);
648 if (
Call->isSpirvOp())
652 "Need 2 arguments for atomic init translation");
664 if (
Call->isSpirvOp())
672 Call->Arguments.size() > 1
676 if (
Call->Arguments.size() > 2) {
678 MemSemanticsReg =
Call->Arguments[2];
681 SPIRV::MemorySemantics::SequentiallyConsistent |
699 if (
Call->isSpirvOp())
707 SPIRV::MemorySemantics::SequentiallyConsistent |
722 if (
Call->isSpirvOp())
726 bool IsCmpxchg =
Call->Builtin->Name.contains(
"cmpxchg");
733 LLT DesiredLLT =
MRI->getType(Desired);
736 SPIRV::OpTypePointer);
739 assert(IsCmpxchg ? ExpectedType == SPIRV::OpTypeInt
740 : ExpectedType == SPIRV::OpTypePointer);
745 auto StorageClass =
static_cast<SPIRV::StorageClass::StorageClass
>(
753 ? SPIRV::MemorySemantics::None
754 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
757 ? SPIRV::MemorySemantics::None
758 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
759 if (
Call->Arguments.size() >= 4) {
761 "Need 5+ args for explicit atomic cmpxchg");
768 if (
static_cast<unsigned>(MemOrdEq) == MemSemEqual)
769 MemSemEqualReg =
Call->Arguments[3];
770 if (
static_cast<unsigned>(MemOrdNeq) == MemSemEqual)
771 MemSemUnequalReg =
Call->Arguments[4];
775 if (!MemSemUnequalReg.
isValid())
779 auto Scope = IsCmpxchg ? SPIRV::Scope::Workgroup : SPIRV::Scope::Device;
780 if (
Call->Arguments.size() >= 6) {
782 "Extra args for explicit atomic cmpxchg");
783 auto ClScope =
static_cast<SPIRV::CLMemoryScope
>(
786 if (ClScope ==
static_cast<unsigned>(Scope))
787 ScopeReg =
Call->Arguments[5];
797 Register Tmp = !IsCmpxchg ?
MRI->createGenericVirtualRegister(DesiredLLT)
798 :
Call->ReturnRegister;
799 if (!
MRI->getRegClassOrNull(Tmp))
823 if (
Call->isSpirvOp())
832 "Too many args for explicit atomic RMW");
833 ScopeRegister =
buildScopeReg(ScopeRegister, SPIRV::Scope::Workgroup,
834 MIRBuilder, GR,
MRI);
837 unsigned Semantics = SPIRV::MemorySemantics::None;
841 Semantics, MIRBuilder, GR);
845 if (
Call->ReturnType->getOpcode() == SPIRV::OpTypeFloat) {
846 if (Opcode == SPIRV::OpAtomicIAdd) {
847 Opcode = SPIRV::OpAtomicFAddEXT;
848 }
else if (Opcode == SPIRV::OpAtomicISub) {
851 Opcode = SPIRV::OpAtomicFAddEXT;
853 MRI->createGenericVirtualRegister(
MRI->getType(ValueReg));
862 ValueReg = NegValueReg;
881 "Wrong number of atomic floating-type builtin");
901 bool IsSet = Opcode == SPIRV::OpAtomicFlagTestAndSet;
903 if (
Call->isSpirvOp())
909 unsigned Semantics = SPIRV::MemorySemantics::SequentiallyConsistent;
913 Semantics, MIRBuilder, GR);
915 assert((Opcode != SPIRV::OpAtomicFlagClear ||
916 (Semantics != SPIRV::MemorySemantics::Acquire &&
917 Semantics != SPIRV::MemorySemantics::AcquireRelease)) &&
918 "Invalid memory order argument!");
941 if ((Opcode == SPIRV::OpControlBarrierArriveINTEL ||
942 Opcode == SPIRV::OpControlBarrierWaitINTEL) &&
943 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_split_barrier)) {
944 std::string DiagMsg = std::string(Builtin->
Name) +
945 ": the builtin requires the following SPIR-V "
946 "extension: SPV_INTEL_split_barrier";
950 if (
Call->isSpirvOp())
955 unsigned MemSemantics = SPIRV::MemorySemantics::None;
957 if (MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE)
958 MemSemantics |= SPIRV::MemorySemantics::WorkgroupMemory;
960 if (MemFlags & SPIRV::CLK_GLOBAL_MEM_FENCE)
961 MemSemantics |= SPIRV::MemorySemantics::CrossWorkgroupMemory;
963 if (MemFlags & SPIRV::CLK_IMAGE_MEM_FENCE)
964 MemSemantics |= SPIRV::MemorySemantics::ImageMemory;
966 if (Opcode == SPIRV::OpMemoryBarrier)
970 else if (Opcode == SPIRV::OpControlBarrierArriveINTEL)
971 MemSemantics |= SPIRV::MemorySemantics::Release;
972 else if (Opcode == SPIRV::OpControlBarrierWaitINTEL)
973 MemSemantics |= SPIRV::MemorySemantics::Acquire;
975 MemSemantics |= SPIRV::MemorySemantics::SequentiallyConsistent;
978 MemFlags == MemSemantics
982 SPIRV::Scope::Scope Scope = SPIRV::Scope::Workgroup;
983 SPIRV::Scope::Scope MemScope = Scope;
984 if (
Call->Arguments.size() >= 2) {
986 ((Opcode != SPIRV::OpMemoryBarrier &&
Call->Arguments.size() == 2) ||
987 (Opcode == SPIRV::OpMemoryBarrier &&
Call->Arguments.size() == 3)) &&
988 "Extra args for explicitly scoped barrier");
989 Register ScopeArg = (Opcode == SPIRV::OpMemoryBarrier) ?
Call->Arguments[2]
990 :
Call->Arguments[1];
991 SPIRV::CLMemoryScope CLScope =
994 if (!(MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE) ||
995 (Opcode == SPIRV::OpMemoryBarrier))
997 if (CLScope ==
static_cast<unsigned>(Scope))
998 ScopeReg =
Call->Arguments[1];
1005 if (Opcode != SPIRV::OpMemoryBarrier)
1007 MIB.
addUse(MemSemanticsReg);
1019 if ((Opcode == SPIRV::OpBitFieldInsert ||
1020 Opcode == SPIRV::OpBitFieldSExtract ||
1021 Opcode == SPIRV::OpBitFieldUExtract || Opcode == SPIRV::OpBitReverse) &&
1022 !ST->canUseExtension(SPIRV::Extension::SPV_KHR_bit_instructions)) {
1023 std::string DiagMsg = std::string(Builtin->
Name) +
1024 ": the builtin requires the following SPIR-V "
1025 "extension: SPV_KHR_bit_instructions";
1030 if (
Call->isSpirvOp())
1037 for (
unsigned i = 0; i <
Call->Arguments.size(); ++i)
1049 if (
Call->isSpirvOp())
1066 if (
Call->isSpirvOp())
1073 for (
unsigned i = 0; i <
Call->Arguments.size(); ++i)
1083 if (
Call->isSpirvOp())
1090 for (
unsigned i = 0; i <
Call->Arguments.size(); ++i)
1102 if (
Call->isSpirvOp())
1108 for (
unsigned i = 0; i <
Call->Arguments.size(); ++i)
1118 case SPIRV::OpCommitReadPipe:
1119 case SPIRV::OpCommitWritePipe:
1121 case SPIRV::OpGroupCommitReadPipe:
1122 case SPIRV::OpGroupCommitWritePipe:
1123 case SPIRV::OpGroupReserveReadPipePackets:
1124 case SPIRV::OpGroupReserveWritePipePackets: {
1128 MRI->setRegClass(ScopeConstReg, &SPIRV::iIDRegClass);
1132 if (Opcode == SPIRV::OpGroupReserveReadPipePackets ||
1133 Opcode == SPIRV::OpGroupReserveWritePipePackets)
1137 MIB.
addUse(ScopeConstReg);
1138 for (
unsigned int i = 0; i <
Call->Arguments.size(); ++i)
1151 case SPIRV::Dim::DIM_1D:
1152 case SPIRV::Dim::DIM_Buffer:
1154 case SPIRV::Dim::DIM_2D:
1155 case SPIRV::Dim::DIM_Cube:
1156 case SPIRV::Dim::DIM_Rect:
1158 case SPIRV::Dim::DIM_3D:
1171 return arrayed ? numComps + 1 : numComps;
1175 switch (BuiltinNumber) {
1176 case SPIRV::OpenCLExtInst::s_min:
1177 case SPIRV::OpenCLExtInst::u_min:
1178 case SPIRV::OpenCLExtInst::s_max:
1179 case SPIRV::OpenCLExtInst::u_max:
1180 case SPIRV::OpenCLExtInst::fmax:
1181 case SPIRV::OpenCLExtInst::fmin:
1182 case SPIRV::OpenCLExtInst::fmax_common:
1183 case SPIRV::OpenCLExtInst::fmin_common:
1184 case SPIRV::OpenCLExtInst::s_clamp:
1185 case SPIRV::OpenCLExtInst::fclamp:
1186 case SPIRV::OpenCLExtInst::u_clamp:
1187 case SPIRV::OpenCLExtInst::mix:
1188 case SPIRV::OpenCLExtInst::step:
1189 case SPIRV::OpenCLExtInst::smoothstep:
1206 unsigned ResultElementCount =
1208 bool MayNeedPromotionToVec =
1211 if (!MayNeedPromotionToVec)
1212 return {
Call->Arguments.begin(),
Call->Arguments.end()};
1218 if (ArgumentType !=
Call->ReturnType) {
1220 auto VecSplat = MIRBuilder.
buildInstr(SPIRV::OpCompositeConstruct)
1223 for (
unsigned I = 0;
I != ResultElementCount; ++
I)
1237 SPIRV::lookupExtendedBuiltin(Builtin->
Name, Builtin->
Set)->Number;
1244 if (ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2) &&
1245 (
Number == SPIRV::OpenCLExtInst::fmin_common ||
1246 Number == SPIRV::OpenCLExtInst::fmax_common)) {
1248 ? SPIRV::OpenCLExtInst::fmin
1249 : SPIRV::OpenCLExtInst::fmax;
1261 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
1268 if (OrigNumber == SPIRV::OpenCLExtInst::fmin_common ||
1269 OrigNumber == SPIRV::OpenCLExtInst::fmax_common) {
1283 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
1287 std::tie(CompareRegister, RelationType) =
1300 Call->ReturnType, GR);
1308 SPIRV::lookupGroupBuiltin(Builtin->
Name);
1311 if (
Call->isSpirvOp()) {
1314 if (GroupBuiltin->
Opcode ==
1315 SPIRV::OpSubgroupMatrixMultiplyAccumulateINTEL &&
1316 Call->Arguments.size() > 4)
1325 if (!
MI ||
MI->getOpcode() != TargetOpcode::G_CONSTANT)
1327 "Group Operation parameter must be an integer constant");
1328 uint64_t GrpOp =
MI->getOperand(1).getCImm()->getValue().getZExtValue();
1335 for (
unsigned i = 2; i <
Call->Arguments.size(); ++i)
1348 if (ArgInstruction->
getOpcode() == TargetOpcode::G_CONSTANT) {
1349 if (BoolRegType->
getOpcode() != SPIRV::OpTypeBool)
1353 if (BoolRegType->
getOpcode() == SPIRV::OpTypeInt) {
1355 MRI->setRegClass(Arg0, &SPIRV::iIDRegClass);
1362 }
else if (BoolRegType->
getOpcode() != SPIRV::OpTypeBool) {
1374 const bool HasBoolReturnTy =
1379 if (HasBoolReturnTy)
1380 std::tie(GroupResultRegister, GroupResultType) =
1383 auto Scope = Builtin->
Name.
starts_with(
"sub_group") ? SPIRV::Scope::Subgroup
1384 : SPIRV::Scope::Workgroup;
1388 if (GroupBuiltin->
Opcode == SPIRV::OpGroupBroadcast &&
1389 Call->Arguments.size() > 2) {
1397 if (!ElemType || ElemType->
getOpcode() != SPIRV::OpTypeInt)
1399 unsigned VecLen =
Call->Arguments.size() - 1;
1400 VecReg =
MRI->createGenericVirtualRegister(
1402 MRI->setRegClass(VecReg, &SPIRV::vIDRegClass);
1408 for (
unsigned i = 1; i <
Call->Arguments.size(); i++) {
1409 MIB.addUse(
Call->Arguments[i]);
1418 .
addDef(GroupResultRegister)
1424 if (
Call->Arguments.size() > 0) {
1425 MIB.addUse(Arg0.
isValid() ? Arg0 :
Call->Arguments[0]);
1430 for (
unsigned i = 1; i <
Call->Arguments.size(); i++)
1431 MIB.addUse(
Call->Arguments[i]);
1435 if (HasBoolReturnTy)
1437 Call->ReturnType, GR);
1448 SPIRV::lookupIntelSubgroupsBuiltin(Builtin->
Name);
1450 if (IntelSubgroups->
IsMedia &&
1451 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_media_block_io)) {
1452 std::string DiagMsg = std::string(Builtin->
Name) +
1453 ": the builtin requires the following SPIR-V "
1454 "extension: SPV_INTEL_media_block_io";
1456 }
else if (!IntelSubgroups->
IsMedia &&
1457 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1458 std::string DiagMsg = std::string(Builtin->
Name) +
1459 ": the builtin requires the following SPIR-V "
1460 "extension: SPV_INTEL_subgroups";
1465 if (
Call->isSpirvOp()) {
1466 bool IsSet = OpCode != SPIRV::OpSubgroupBlockWriteINTEL &&
1467 OpCode != SPIRV::OpSubgroupImageBlockWriteINTEL &&
1468 OpCode != SPIRV::OpSubgroupImageMediaBlockWriteINTEL;
1474 if (IntelSubgroups->
IsBlock) {
1477 if (Arg0Type->getOpcode() == SPIRV::OpTypeImage) {
1483 case SPIRV::OpSubgroupBlockReadINTEL:
1484 OpCode = SPIRV::OpSubgroupImageBlockReadINTEL;
1486 case SPIRV::OpSubgroupBlockWriteINTEL:
1487 OpCode = SPIRV::OpSubgroupImageBlockWriteINTEL;
1510 for (
size_t i = 0; i <
Call->Arguments.size(); ++i)
1521 if (!ST->canUseExtension(
1522 SPIRV::Extension::SPV_KHR_uniform_group_instructions)) {
1523 std::string DiagMsg = std::string(Builtin->
Name) +
1524 ": the builtin requires the following SPIR-V "
1525 "extension: SPV_KHR_uniform_group_instructions";
1529 SPIRV::lookupGroupUniformBuiltin(Builtin->
Name);
1539 if (!Const || Const->getOpcode() != TargetOpcode::G_CONSTANT)
1541 "expect a constant group operation for a uniform group instruction",
1544 if (!ConstOperand.
isCImm())
1554 MIB.addUse(ValueReg);
1565 if (!ST->canUseExtension(SPIRV::Extension::SPV_KHR_shader_clock)) {
1566 std::string DiagMsg = std::string(Builtin->
Name) +
1567 ": the builtin requires the following SPIR-V "
1568 "extension: SPV_KHR_shader_clock";
1574 if (Builtin->
Name ==
"__spirv_ReadClockKHR") {
1581 SPIRV::Scope::Scope ScopeArg =
1583 .
EndsWith(
"device", SPIRV::Scope::Scope::Device)
1584 .
EndsWith(
"work_group", SPIRV::Scope::Scope::Workgroup)
1585 .
EndsWith(
"sub_group", SPIRV::Scope::Scope::Subgroup);
1626 SPIRV::BuiltIn::BuiltIn BuiltinValue,
1629 const unsigned ResultWidth =
Call->ReturnType->getOperand(1).getImm();
1640 bool IsConstantIndex =
1641 IndexInstruction->getOpcode() == TargetOpcode::G_CONSTANT;
1647 if (PointerSize != ResultWidth) {
1648 DefaultReg =
MRI->createGenericVirtualRegister(
LLT::scalar(PointerSize));
1649 MRI->setRegClass(DefaultReg, &SPIRV::iIDRegClass);
1651 MIRBuilder.
getMF());
1652 ToTruncate = DefaultReg;
1656 MIRBuilder.
buildCopy(DefaultReg, NewRegister);
1665 if (!IsConstantIndex || PointerSize != ResultWidth) {
1666 Extracted =
MRI->createGenericVirtualRegister(
LLT::scalar(PointerSize));
1667 MRI->setRegClass(Extracted, &SPIRV::iIDRegClass);
1674 ExtractInst.
addUse(LoadedVector).
addUse(IndexRegister);
1677 if (!IsConstantIndex) {
1685 MRI->setRegClass(CompareRegister, &SPIRV::iIDRegClass);
1700 if (PointerSize != ResultWidth) {
1703 MRI->setRegClass(SelectionResult, &SPIRV::iIDRegClass);
1705 MIRBuilder.
getMF());
1708 MIRBuilder.
buildSelect(SelectionResult, CompareRegister, Extracted,
1710 ToTruncate = SelectionResult;
1712 ToTruncate = Extracted;
1716 if (PointerSize != ResultWidth)
1726 SPIRV::BuiltIn::BuiltIn
Value =
1727 SPIRV::lookupGetBuiltin(Builtin->
Name, Builtin->
Set)->
Value;
1729 if (
Value == SPIRV::BuiltIn::GlobalInvocationId)
1735 if (
Call->ReturnType->getOpcode() == SPIRV::OpTypeVector)
1742 LLType,
Call->ReturnRegister);
1751 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
1754 case SPIRV::OpStore:
1756 case SPIRV::OpAtomicLoad:
1758 case SPIRV::OpAtomicStore:
1760 case SPIRV::OpAtomicCompareExchange:
1761 case SPIRV::OpAtomicCompareExchangeWeak:
1764 case SPIRV::OpAtomicIAdd:
1765 case SPIRV::OpAtomicISub:
1766 case SPIRV::OpAtomicOr:
1767 case SPIRV::OpAtomicXor:
1768 case SPIRV::OpAtomicAnd:
1769 case SPIRV::OpAtomicExchange:
1771 case SPIRV::OpMemoryBarrier:
1773 case SPIRV::OpAtomicFlagTestAndSet:
1774 case SPIRV::OpAtomicFlagClear:
1777 if (
Call->isSpirvOp())
1789 unsigned Opcode = SPIRV::lookupAtomicFloatingBuiltin(Builtin->
Name)->Opcode;
1792 case SPIRV::OpAtomicFAddEXT:
1793 case SPIRV::OpAtomicFMinEXT:
1794 case SPIRV::OpAtomicFMaxEXT:
1807 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
1818 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
1820 if (Opcode == SPIRV::OpGenericCastToPtrExplicit) {
1821 SPIRV::StorageClass::StorageClass ResSC =
1832 MIRBuilder.
buildInstr(TargetOpcode::G_ADDRSPACE_CAST)
1843 if (
Call->isSpirvOp())
1848 SPIRV::OpTypeVector;
1850 uint32_t OC = IsVec ? SPIRV::OpDot : SPIRV::OpFMulS;
1851 bool IsSwapReq =
false;
1856 (ST->canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
1860 SPIRV::lookupIntegerDotProductBuiltin(Builtin->
Name);
1870 bool IsFirstSigned = TypeStrs[0].trim()[0] !=
'u';
1871 bool IsSecondSigned = TypeStrs[1].trim()[0] !=
'u';
1873 if (
Call->BuiltinName ==
"dot") {
1874 if (IsFirstSigned && IsSecondSigned)
1876 else if (!IsFirstSigned && !IsSecondSigned)
1879 OC = SPIRV::OpSUDot;
1883 }
else if (
Call->BuiltinName ==
"dot_acc_sat") {
1884 if (IsFirstSigned && IsSecondSigned)
1885 OC = SPIRV::OpSDotAccSat;
1886 else if (!IsFirstSigned && !IsSecondSigned)
1887 OC = SPIRV::OpUDotAccSat;
1889 OC = SPIRV::OpSUDotAccSat;
1905 for (
size_t i = 2; i <
Call->Arguments.size(); ++i)
1908 for (
size_t i = 0; i <
Call->Arguments.size(); ++i)
1914 if (!IsVec && OC != SPIRV::OpFMulS)
1915 MIB.
addImm(SPIRV::PackedVectorFormat4x8Bit);
1924 SPIRV::BuiltIn::BuiltIn
Value =
1925 SPIRV::lookupGetBuiltin(Builtin->
Name, Builtin->
Set)->
Value;
1928 assert(
Call->ReturnType->getOpcode() == SPIRV::OpTypeInt);
1932 MIRBuilder,
Call->ReturnType, GR,
Value, LLType,
Call->ReturnRegister,
1933 false, std::nullopt);
1947 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
1954 if (RetType->
getOpcode() != SPIRV::OpTypeStruct)
1956 "overflow builtins");
1960 if (!OpType1 || !OpType2 || OpType1 != OpType2)
1962 if (OpType1->
getOpcode() == SPIRV::OpTypeVector)
1964 case SPIRV::OpIAddCarryS:
1965 Opcode = SPIRV::OpIAddCarryV;
1967 case SPIRV::OpISubBorrowS:
1968 Opcode = SPIRV::OpISubBorrowV;
1973 Register ResReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1975 MRI->getRegClassOrNull(
Call->Arguments[1])) {
1976 MRI->setRegClass(ResReg, DstRC);
1977 MRI->setType(ResReg,
MRI->getType(
Call->Arguments[1]));
1995 SPIRV::BuiltIn::BuiltIn
Value =
1996 SPIRV::lookupGetBuiltin(
Call->Builtin->Name,
Call->Builtin->Set)->
Value;
1997 const bool IsDefaultOne = (
Value == SPIRV::BuiltIn::GlobalSize ||
1998 Value == SPIRV::BuiltIn::NumWorkgroups ||
1999 Value == SPIRV::BuiltIn::WorkgroupSize ||
2000 Value == SPIRV::BuiltIn::EnqueuedWorkgroupSize);
2010 SPIRV::lookupImageQueryBuiltin(Builtin->
Name, Builtin->
Set)->Component;
2014 unsigned NumExpectedRetComponents =
2015 Call->ReturnType->getOpcode() == SPIRV::OpTypeVector
2016 ?
Call->ReturnType->getOperand(2).getImm()
2023 if (NumExpectedRetComponents != NumActualRetComponents) {
2024 unsigned Bitwidth =
Call->ReturnType->getOpcode() == SPIRV::OpTypeInt
2025 ?
Call->ReturnType->getOperand(1).getImm()
2032 IntTy, NumActualRetComponents, MIRBuilder,
true);
2037 IsDimBuf ? SPIRV::OpImageQuerySize : SPIRV::OpImageQuerySizeLod;
2044 if (NumExpectedRetComponents == NumActualRetComponents)
2046 if (NumExpectedRetComponents == 1) {
2048 unsigned ExtractedComposite =
2049 Component == 3 ? NumActualRetComponents - 1 : Component;
2050 assert(ExtractedComposite < NumActualRetComponents &&
2051 "Invalid composite index!");
2054 if (QueryResultType->
getOpcode() == SPIRV::OpTypeVector) {
2056 if (TypeReg != NewTypeReg &&
2058 TypeReg = NewTypeReg;
2060 MIRBuilder.
buildInstr(SPIRV::OpCompositeExtract)
2064 .
addImm(ExtractedComposite);
2065 if (NewType !=
nullptr)
2070 auto MIB = MIRBuilder.
buildInstr(SPIRV::OpVectorShuffle)
2075 for (
unsigned i = 0; i < NumExpectedRetComponents; ++i)
2076 MIB.
addImm(i < NumActualRetComponents ? i : 0xffffffff);
2084 assert(
Call->ReturnType->getOpcode() == SPIRV::OpTypeInt &&
2085 "Image samples query result must be of int type!");
2090 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2093 SPIRV::Dim::Dim ImageDimensionality =
static_cast<SPIRV::Dim::Dim
>(
2095 (void)ImageDimensionality;
2098 case SPIRV::OpImageQuerySamples:
2099 assert(ImageDimensionality == SPIRV::Dim::DIM_2D &&
2100 "Image must be of 2D dimensionality");
2102 case SPIRV::OpImageQueryLevels:
2103 assert((ImageDimensionality == SPIRV::Dim::DIM_1D ||
2104 ImageDimensionality == SPIRV::Dim::DIM_2D ||
2105 ImageDimensionality == SPIRV::Dim::DIM_3D ||
2106 ImageDimensionality == SPIRV::Dim::DIM_Cube) &&
2107 "Image must be of 1D/2D/3D/Cube dimensionality");
2119static SPIRV::SamplerAddressingMode::SamplerAddressingMode
2121 switch (Bitmask & SPIRV::CLK_ADDRESS_MODE_MASK) {
2122 case SPIRV::CLK_ADDRESS_CLAMP:
2123 return SPIRV::SamplerAddressingMode::Clamp;
2124 case SPIRV::CLK_ADDRESS_CLAMP_TO_EDGE:
2125 return SPIRV::SamplerAddressingMode::ClampToEdge;
2126 case SPIRV::CLK_ADDRESS_REPEAT:
2127 return SPIRV::SamplerAddressingMode::Repeat;
2128 case SPIRV::CLK_ADDRESS_MIRRORED_REPEAT:
2129 return SPIRV::SamplerAddressingMode::RepeatMirrored;
2130 case SPIRV::CLK_ADDRESS_NONE:
2131 return SPIRV::SamplerAddressingMode::None;
2138 return (Bitmask & SPIRV::CLK_NORMALIZED_COORDS_TRUE) ? 1 : 0;
2141static SPIRV::SamplerFilterMode::SamplerFilterMode
2143 if (Bitmask & SPIRV::CLK_FILTER_LINEAR)
2144 return SPIRV::SamplerFilterMode::Linear;
2145 if (Bitmask & SPIRV::CLK_FILTER_NEAREST)
2146 return SPIRV::SamplerFilterMode::Nearest;
2147 return SPIRV::SamplerFilterMode::Nearest;
2154 if (
Call->isSpirvOp())
2161 if (HasOclSampler) {
2175 Register SampledImage =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2186 if (
Call->ReturnType->getOpcode() != SPIRV::OpTypeVector) {
2190 MRI->createGenericVirtualRegister(GR->
getRegType(TempType));
2193 MIRBuilder.
buildInstr(SPIRV::OpImageSampleExplicitLod)
2198 .
addImm(SPIRV::ImageOperand::Lod)
2200 MIRBuilder.
buildInstr(SPIRV::OpCompositeExtract)
2206 MIRBuilder.
buildInstr(SPIRV::OpImageSampleExplicitLod)
2211 .
addImm(SPIRV::ImageOperand::Lod)
2214 }
else if (HasMsaa) {
2220 .
addImm(SPIRV::ImageOperand::Sample)
2235 if (
Call->isSpirvOp())
2250 if (
Call->Builtin->Name.contains_insensitive(
2251 "__translate_sampler_initializer")) {
2258 return Sampler.isValid();
2259 }
else if (
Call->Builtin->Name.contains_insensitive(
"__spirv_SampledImage")) {
2266 Call->ReturnRegister.isValid()
2267 ?
Call->ReturnRegister
2268 :
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2275 }
else if (
Call->Builtin->Name.contains_insensitive(
2276 "__spirv_ImageSampleExplicitLod")) {
2278 std::string ReturnType = DemangledCall.
str();
2279 if (DemangledCall.
contains(
"_R")) {
2280 ReturnType = ReturnType.substr(ReturnType.find(
"_R") + 2);
2281 ReturnType = ReturnType.substr(0, ReturnType.find(
'('));
2288 std::string DiagMsg =
2289 "Unable to recognize SPIRV type name: " + ReturnType;
2292 MIRBuilder.
buildInstr(SPIRV::OpImageSampleExplicitLod)
2297 .
addImm(SPIRV::ImageOperand::Lod)
2307 Call->Arguments[1],
Call->Arguments[2]);
2315 SPIRV::OpCompositeConstructContinuedINTEL,
2316 Call->Arguments,
Call->ReturnRegister,
2326 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2327 bool IsSet = Opcode != SPIRV::OpCooperativeMatrixStoreKHR &&
2328 Opcode != SPIRV::OpCooperativeMatrixStoreCheckedINTEL &&
2329 Opcode != SPIRV::OpCooperativeMatrixPrefetchINTEL;
2330 unsigned ArgSz =
Call->Arguments.size();
2331 unsigned LiteralIdx = 0;
2334 case SPIRV::OpCooperativeMatrixLoadKHR:
2335 LiteralIdx = ArgSz > 3 ? 3 : 0;
2337 case SPIRV::OpCooperativeMatrixStoreKHR:
2338 LiteralIdx = ArgSz > 4 ? 4 : 0;
2340 case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
2341 LiteralIdx = ArgSz > 7 ? 7 : 0;
2343 case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
2344 LiteralIdx = ArgSz > 8 ? 8 : 0;
2347 case SPIRV::OpCooperativeMatrixMulAddKHR:
2348 LiteralIdx = ArgSz > 3 ? 3 : 0;
2354 if (Opcode == SPIRV::OpCooperativeMatrixPrefetchINTEL) {
2356 auto MIB = MIRBuilder.
buildInstr(SPIRV::OpCooperativeMatrixPrefetchINTEL)
2373 if (Opcode == SPIRV::OpCooperativeMatrixLengthKHR) {
2384 IsSet ? TypeReg :
Register(0), ImmArgs);
2393 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2397 case SPIRV::OpSpecConstant: {
2407 (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
2408 Const->getOpcode() == TargetOpcode::G_FCONSTANT) &&
2409 "Argument should be either an int or floating-point constant");
2412 if (
Call->ReturnType->getOpcode() == SPIRV::OpTypeBool) {
2413 assert(ConstOperand.
isCImm() &&
"Int constant operand is expected");
2415 ? SPIRV::OpSpecConstantTrue
2416 : SPIRV::OpSpecConstantFalse;
2422 if (
Call->ReturnType->getOpcode() != SPIRV::OpTypeBool) {
2423 if (Const->getOpcode() == TargetOpcode::G_CONSTANT)
2430 case SPIRV::OpSpecConstantComposite: {
2432 SPIRV::OpSpecConstantCompositeContinuedINTEL,
2433 Call->Arguments,
Call->ReturnRegister,
2448 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2459 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2469 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2483 const LLT ValTy =
MRI->getType(InputReg);
2484 Register ActualRetValReg =
MRI->createGenericVirtualRegister(ValTy);
2487 InputReg =
Call->Arguments[1];
2490 if (InputType->getTypeID() == llvm::Type::TypeID::TypedPointerTyID) {
2491 LLT InputLLT =
MRI->getType(InputReg);
2492 PtrInputReg =
MRI->createGenericVirtualRegister(InputLLT);
2498 MIRBuilder.
buildLoad(PtrInputReg, InputReg, *MMO1);
2499 MRI->setRegClass(PtrInputReg, &SPIRV::iIDRegClass);
2503 for (
unsigned index = 2; index < 7; index++) {
2518 unsigned Size = ValTy.getSizeInBytes();
2522 MRI->setRegClass(ActualRetValReg, &SPIRV::pIDRegClass);
2523 MIRBuilder.
buildStore(ActualRetValReg,
Call->Arguments[0], *MMO);
2526 for (
unsigned index = 1; index < 6; index++)
2539 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2551 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2561 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2572 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2582 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2584 unsigned Scope = SPIRV::Scope::Workgroup;
2586 Scope = SPIRV::Scope::Subgroup;
2596 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2598 bool IsSet = Opcode != SPIRV::OpPredicatedStoreINTEL;
2599 unsigned ArgSz =
Call->Arguments.size();
2609 IsSet ? TypeReg :
Register(0), ImmArgs);
2622 Register TmpReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2626 unsigned NumArgs =
Call->Arguments.size();
2628 Register GlobalWorkSize =
Call->Arguments[NumArgs < 4 ? 1 : 2];
2630 NumArgs == 2 ?
Register(0) :
Call->Arguments[NumArgs < 4 ? 2 : 3];
2635 if (SpvTy->
getOpcode() == SPIRV::OpTypePointer) {
2641 unsigned Size =
Call->Builtin->Name ==
"ndrange_3D" ? 3 : 2;
2646 FieldTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
true);
2647 GlobalWorkSize =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2656 SpvFieldTy, *ST.getInstrInfo());
2661 LocalWorkSize = Const;
2662 if (!GlobalWorkOffset.
isValid())
2663 GlobalWorkOffset = Const;
2671 .
addUse(GlobalWorkOffset);
2685 SPIRV::AccessQualifier::ReadWrite,
true);
2693 bool IsSpirvOp =
Call->isSpirvOp();
2694 bool HasEvents =
Call->Builtin->Name.contains(
"events") || IsSpirvOp;
2701 if (
Call->Builtin->Name.contains(
"_varargs") || IsSpirvOp) {
2702 const unsigned LocalSizeArrayIdx = HasEvents ? 9 : 6;
2710 assert(LocalSizeTy &&
"Local size type is expected");
2716 Int32Ty, MIRBuilder, SPIRV::StorageClass::Function);
2717 for (
unsigned I = 0;
I < LocalSizeNum; ++
I) {
2719 MRI->setType(
Reg, LLType);
2733 auto MIB = MIRBuilder.
buildInstr(SPIRV::OpEnqueueKernel)
2738 const unsigned BlockFIdx = HasEvents ? 6 : 3;
2739 for (
unsigned i = 0; i < BlockFIdx; i++)
2740 MIB.addUse(
Call->Arguments[i]);
2747 MIB.addUse(NullPtr);
2748 MIB.addUse(NullPtr);
2756 Register BlockLiteralReg =
Call->Arguments[BlockFIdx + 1];
2758 MIB.addUse(BlockLiteralReg);
2768 for (
unsigned i = 0; i < LocalSizes.
size(); i++)
2769 MIB.addUse(LocalSizes[i]);
2779 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2782 case SPIRV::OpRetainEvent:
2783 case SPIRV::OpReleaseEvent:
2785 case SPIRV::OpCreateUserEvent:
2786 case SPIRV::OpGetDefaultQueue:
2790 case SPIRV::OpIsValidEvent:
2795 case SPIRV::OpSetUserEventStatus:
2799 case SPIRV::OpCaptureEventProfilingInfo:
2804 case SPIRV::OpBuildNDRange:
2806 case SPIRV::OpEnqueueKernel:
2819 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2821 bool IsSet = Opcode == SPIRV::OpGroupAsyncCopy;
2823 if (
Call->isSpirvOp())
2830 case SPIRV::OpGroupAsyncCopy: {
2832 Call->ReturnType->getOpcode() == SPIRV::OpTypeEvent
2836 unsigned NumArgs =
Call->Arguments.size();
2846 ?
Call->Arguments[3]
2849 if (NewType !=
nullptr)
2854 case SPIRV::OpGroupWaitEvents:
2870 SPIRV::lookupConvertBuiltin(
Call->Builtin->Name,
Call->Builtin->Set);
2872 if (!Builtin &&
Call->isSpirvOp()) {
2875 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2880 assert(Builtin &&
"Conversion builtin not found.");
2883 SPIRV::Decoration::SaturatedConversion, {});
2886 SPIRV::Decoration::FPRoundingMode,
2887 {(unsigned)Builtin->RoundingMode});
2889 std::string NeedExtMsg;
2890 bool IsRightComponentsNumber =
true;
2891 unsigned Opcode = SPIRV::OpNop;
2898 : SPIRV::OpSatConvertSToU;
2901 : SPIRV::OpSConvert;
2903 SPIRV::OpTypeFloat)) {
2907 &MIRBuilder.
getMF().getSubtarget());
2908 if (!ST->canUseExtension(
2909 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2910 NeedExtMsg =
"SPV_INTEL_bfloat16_conversion";
2911 IsRightComponentsNumber =
2914 Opcode = SPIRV::OpConvertBF16ToFINTEL;
2916 bool IsSourceSigned =
2918 Opcode = IsSourceSigned ? SPIRV::OpConvertSToF : SPIRV::OpConvertUToF;
2922 SPIRV::OpTypeFloat)) {
2928 &MIRBuilder.
getMF().getSubtarget());
2929 if (!ST->canUseExtension(
2930 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2931 NeedExtMsg =
"SPV_INTEL_bfloat16_conversion";
2932 IsRightComponentsNumber =
2935 Opcode = SPIRV::OpConvertFToBF16INTEL;
2938 : SPIRV::OpConvertFToU;
2941 SPIRV::OpTypeFloat)) {
2944 &MIRBuilder.
getMF().getSubtarget());
2945 if (!ST->canUseExtension(
2946 SPIRV::Extension::SPV_INTEL_tensor_float32_conversion))
2947 NeedExtMsg =
"SPV_INTEL_tensor_float32_conversion";
2948 IsRightComponentsNumber =
2951 Opcode = SPIRV::OpRoundFToTF32INTEL;
2954 Opcode = SPIRV::OpFConvert;
2959 if (!NeedExtMsg.empty()) {
2960 std::string DiagMsg = std::string(Builtin->
Name) +
2961 ": the builtin requires the following SPIR-V "
2966 if (!IsRightComponentsNumber) {
2967 std::string DiagMsg =
2968 std::string(Builtin->
Name) +
2969 ": result and argument must have the same number of components";
2972 assert(Opcode != SPIRV::OpNop &&
2973 "Conversion between the types not implemented!");
2987 SPIRV::lookupVectorLoadStoreBuiltin(
Call->Builtin->Name,
2988 Call->Builtin->Set);
2994 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
3014 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
3015 bool IsLoad = Opcode == SPIRV::OpLoad;
3019 MIB.addDef(
Call->ReturnRegister);
3027 MIB.addUse(
Call->Arguments[1]);
3029 unsigned NumArgs =
Call->Arguments.size();
3030 if ((IsLoad && NumArgs >= 2) || NumArgs >= 3)
3032 if ((IsLoad && NumArgs >= 3) || NumArgs >= 4)
3045std::tuple<int, unsigned, unsigned>
3047 SPIRV::InstructionSet::InstructionSet Set) {
3050 std::unique_ptr<const IncomingCall>
Call =
3053 return std::make_tuple(-1, 0, 0);
3055 switch (
Call->Builtin->Group) {
3056 case SPIRV::Relational:
3058 case SPIRV::Barrier:
3059 case SPIRV::CastToPtr:
3060 case SPIRV::ImageMiscQuery:
3061 case SPIRV::SpecConstant:
3062 case SPIRV::Enqueue:
3063 case SPIRV::AsyncCopy:
3064 case SPIRV::LoadStore:
3065 case SPIRV::CoopMatr:
3067 SPIRV::lookupNativeBuiltin(
Call->Builtin->Name,
Call->Builtin->Set))
3068 return std::make_tuple(
Call->Builtin->Group, R->Opcode, 0);
3070 case SPIRV::Extended:
3071 if (
const auto *R = SPIRV::lookupExtendedBuiltin(
Call->Builtin->Name,
3072 Call->Builtin->Set))
3073 return std::make_tuple(
Call->Builtin->Group, 0, R->Number);
3075 case SPIRV::VectorLoadStore:
3076 if (
const auto *R = SPIRV::lookupVectorLoadStoreBuiltin(
Call->Builtin->Name,
3077 Call->Builtin->Set))
3078 return std::make_tuple(SPIRV::Extended, 0, R->Number);
3081 if (
const auto *R = SPIRV::lookupGroupBuiltin(
Call->Builtin->Name))
3082 return std::make_tuple(
Call->Builtin->Group, R->Opcode, 0);
3084 case SPIRV::AtomicFloating:
3085 if (
const auto *R = SPIRV::lookupAtomicFloatingBuiltin(
Call->Builtin->Name))
3086 return std::make_tuple(
Call->Builtin->Group, R->Opcode, 0);
3088 case SPIRV::IntelSubgroups:
3089 if (
const auto *R = SPIRV::lookupIntelSubgroupsBuiltin(
Call->Builtin->Name))
3090 return std::make_tuple(
Call->Builtin->Group, R->Opcode, 0);
3092 case SPIRV::GroupUniform:
3093 if (
const auto *R = SPIRV::lookupGroupUniformBuiltin(
Call->Builtin->Name))
3094 return std::make_tuple(
Call->Builtin->Group, R->Opcode, 0);
3096 case SPIRV::IntegerDot:
3098 SPIRV::lookupIntegerDotProductBuiltin(
Call->Builtin->Name))
3099 return std::make_tuple(
Call->Builtin->Group, R->Opcode, 0);
3101 case SPIRV::WriteImage:
3102 return std::make_tuple(
Call->Builtin->Group, SPIRV::OpImageWrite, 0);
3104 return std::make_tuple(
Call->Builtin->Group, TargetOpcode::G_SELECT, 0);
3105 case SPIRV::Construct:
3106 return std::make_tuple(
Call->Builtin->Group, SPIRV::OpCompositeConstruct,
3108 case SPIRV::KernelClock:
3109 return std::make_tuple(
Call->Builtin->Group, SPIRV::OpReadClockKHR, 0);
3111 return std::make_tuple(-1, 0, 0);
3113 return std::make_tuple(-1, 0, 0);
3117 SPIRV::InstructionSet::InstructionSet Set,
3122 LLVM_DEBUG(
dbgs() <<
"Lowering builtin call: " << DemangledCall <<
"\n");
3126 assert(SpvType &&
"Inconsistent return register: expected valid type info");
3127 std::unique_ptr<const IncomingCall>
Call =
3132 return std::nullopt;
3136 assert(Args.size() >=
Call->Builtin->MinNumArgs &&
3137 "Too few arguments to generate the builtin");
3138 if (
Call->Builtin->MaxNumArgs && Args.size() >
Call->Builtin->MaxNumArgs)
3139 LLVM_DEBUG(
dbgs() <<
"More arguments provided than required!\n");
3142 switch (
Call->Builtin->Group) {
3143 case SPIRV::Extended:
3145 case SPIRV::Relational:
3149 case SPIRV::Variable:
3153 case SPIRV::AtomicFloating:
3155 case SPIRV::Barrier:
3157 case SPIRV::CastToPtr:
3160 case SPIRV::IntegerDot:
3164 case SPIRV::ICarryBorrow:
3166 case SPIRV::GetQuery:
3168 case SPIRV::ImageSizeQuery:
3170 case SPIRV::ImageMiscQuery:
3172 case SPIRV::ReadImage:
3174 case SPIRV::WriteImage:
3176 case SPIRV::SampleImage:
3180 case SPIRV::Construct:
3182 case SPIRV::SpecConstant:
3184 case SPIRV::Enqueue:
3186 case SPIRV::AsyncCopy:
3188 case SPIRV::Convert:
3190 case SPIRV::VectorLoadStore:
3192 case SPIRV::LoadStore:
3194 case SPIRV::IntelSubgroups:
3196 case SPIRV::GroupUniform:
3198 case SPIRV::KernelClock:
3200 case SPIRV::CoopMatr:
3202 case SPIRV::ExtendedBitOps:
3204 case SPIRV::BindlessINTEL:
3206 case SPIRV::TernaryBitwiseINTEL:
3208 case SPIRV::Block2DLoadStore:
3212 case SPIRV::PredicatedLoadStore:
3214 case SPIRV::BlockingPipes:
3216 case SPIRV::ArbitraryPrecisionFixedPoint:
3218 case SPIRV::ImageChannelDataTypes:
3229 [[maybe_unused]]
bool IsOCLBuiltinType = TypeStr.
consume_front(
"ocl_");
3230 assert(IsOCLBuiltinType &&
"Invalid OpenCL builtin prefix");
3247 unsigned VecElts = 0;
3258 TypeStr = TypeStr.
substr(0, TypeStr.
find(
']'));
3270 auto Pos1 = DemangledCall.
find(
'(');
3273 auto Pos2 = DemangledCall.
find(
')');
3276 DemangledCall.
slice(Pos1 + 1, Pos2)
3277 .
split(BuiltinArgsTypeStrs,
',', -1,
false);
3285 if (ArgIdx >= BuiltinArgsTypeStrs.
size())
3287 StringRef TypeStr = BuiltinArgsTypeStrs[ArgIdx].trim();
3296#define GET_BuiltinTypes_DECL
3297#define GET_BuiltinTypes_IMPL
3304#define GET_OpenCLTypes_DECL
3305#define GET_OpenCLTypes_IMPL
3307#include "SPIRVGenTables.inc"
3315 if (Name.starts_with(
"void"))
3317 else if (Name.starts_with(
"int") || Name.starts_with(
"uint"))
3319 else if (Name.starts_with(
"float"))
3321 else if (Name.starts_with(
"half"))
3334 unsigned Opcode = TypeRecord->
Opcode;
3349 "Invalid number of parameters for SPIR-V pipe builtin!");
3352 SPIRV::AccessQualifier::AccessQualifier(
3360 "Invalid number of parameters for SPIR-V coop matrices builtin!");
3362 "SPIR-V coop matrices builtin type must have a type parameter!");
3365 SPIRV::AccessQualifier::ReadWrite,
true);
3368 MIRBuilder, ExtensionType, ElemType, ExtensionType->
getIntParameter(0),
3377 OpaqueType, SPIRV::AccessQualifier::ReadOnly, MIRBuilder);
3386 "Inline SPIR-V type builtin takes an opcode, size, and alignment "
3393 if (ParamEType->getName() ==
"spirv.IntegralConstant") {
3394 assert(ParamEType->getNumTypeParameters() == 1 &&
3395 "Inline SPIR-V integral constant builtin must have a type "
3397 assert(ParamEType->getNumIntParameters() == 1 &&
3398 "Inline SPIR-V integral constant builtin must have a "
3401 auto OperandValue = ParamEType->getIntParameter(0);
3402 auto *OperandType = ParamEType->getTypeParameter(0);
3405 OperandType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
true);
3408 OperandValue, MIRBuilder, OperandSPIRVType,
true)));
3410 }
else if (ParamEType->getName() ==
"spirv.Literal") {
3411 assert(ParamEType->getNumTypeParameters() == 0 &&
3412 "Inline SPIR-V literal builtin does not take type "
3414 assert(ParamEType->getNumIntParameters() == 1 &&
3415 "Inline SPIR-V literal builtin must have an integer "
3418 auto OperandValue = ParamEType->getIntParameter(0);
3425 Param, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
true);
3437 "Vulkan buffers have exactly one type for the type of the buffer.");
3439 "Vulkan buffer have 2 integer parameters: storage class and is "
3443 auto SC =
static_cast<SPIRV::StorageClass::StorageClass
>(
3458 StringRef NameWithParameters = TypeName;
3465 SPIRV::lookupOpenCLType(NameWithParameters);
3468 NameWithParameters);
3476 "Unknown builtin opaque type!");
3480 if (!NameWithParameters.
contains(
'_'))
3484 unsigned BaseNameLength = NameWithParameters.
find(
'_') - 1;
3488 bool HasTypeParameter = !
isDigit(Parameters[0][0]);
3489 if (HasTypeParameter)
3492 for (
unsigned i = HasTypeParameter ? 1 : 0; i < Parameters.size(); i++) {
3493 unsigned IntParameter = 0;
3494 bool ValidLiteral = !Parameters[i].getAsInteger(10, IntParameter);
3497 "Invalid format of SPIR-V builtin parameter literal!");
3501 NameWithParameters.
substr(0, BaseNameLength),
3502 TypeParameters, IntParameters);
3506 SPIRV::AccessQualifier::AccessQualifier AccessQual,
3528 if (Name ==
"spirv.Type") {
3530 }
else if (Name ==
"spirv.VulkanBuffer") {
3532 }
else if (Name ==
"spirv.Padding") {
3534 }
else if (Name ==
"spirv.Layout") {
3548 switch (TypeRecord->
Opcode) {
3549 case SPIRV::OpTypeImage:
3552 case SPIRV::OpTypePipe:
3555 case SPIRV::OpTypeDeviceEvent:
3558 case SPIRV::OpTypeSampler:
3561 case SPIRV::OpTypeSampledImage:
3564 case SPIRV::OpTypeCooperativeMatrixKHR:
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Lower Kernel Arguments
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Promote Memory to Register
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
static const fltSemantics & IEEEsingle()
APInt bitcastToAPInt() const
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
uint64_t getZExtValue() const
Get zero extended value.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
@ ICMP_ULT
unsigned less than
const APFloat & getValueAPF() const
const APInt & getValue() const
Return the constant as an APInt value reference.
A parsed version of the target data layout string in and methods for querying it.
Tagged union holding either a T or a Error.
Class to represent fixed width SIMD vectors.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
This is an important class for using LLVM in a threaded context.
static MCOperand createReg(MCRegister Reg)
static MCOperand createImm(int64_t Val)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
LLVMContext & getContext() const
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
const DataLayout & getDataLayout() const
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
LLVM_ABI void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
const GlobalValue * getGlobal() const
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
const MDNode * getMetadata() const
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
LLVM_ABI void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
unsigned getNumVirtRegs() const
getNumVirtRegs - Return the number of virtual registers created.
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
SPIRVType * getOrCreateOpTypePipe(MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AccQual)
SPIRVType * getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
SPIRVType * getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder, bool EmitIR)
SPIRVType * getOrCreatePaddingType(MachineIRBuilder &MIRBuilder)
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, const MachineFunction &MF)
Register buildGlobalVariable(Register Reg, SPIRVType *BaseType, StringRef Name, const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage, const MachineInstr *Init, bool IsConst, const std::optional< SPIRV::LinkageType::LinkageType > &LinkageType, MachineIRBuilder &MIRBuilder, bool IsInstSelector)
const Type * getTypeForSPIRVType(const SPIRVType *Ty) const
SPIRVType * getOrCreateUnknownType(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode, const ArrayRef< MCOperand > Operands)
unsigned getScalarOrVectorComponentCount(Register VReg) const
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineInstr &I, SPIRV::AccessQualifier::AccessQualifier AQ, bool EmitIR)
unsigned getPointerSize() const
SPIRVType * getOrCreateSPIRVPointerType(const Type *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SC)
SPIRVType * getOrCreateOpTypeByOpcode(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode)
Register buildConstantFP(APFloat Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr)
SPIRVType * getPointeeType(SPIRVType *PtrType)
Register getSPIRVTypeID(const SPIRVType *SpirvType) const
bool isScalarOfType(Register VReg, unsigned TypeOpcode) const
SPIRVType * getOrCreateOpTypeSampledImage(SPIRVType *ImageType, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateVulkanBufferType(MachineIRBuilder &MIRBuilder, Type *ElemType, SPIRV::StorageClass::StorageClass SC, bool IsWritable, bool EmitIr=false)
SPIRVType * getOrCreateSPIRVTypeByName(StringRef TypeStr, MachineIRBuilder &MIRBuilder, bool EmitIR, SPIRV::StorageClass::StorageClass SC=SPIRV::StorageClass::Function, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite)
SPIRVType * getOrCreateLayoutType(MachineIRBuilder &MIRBuilder, const TargetExtType *T, bool EmitIr=false)
Register getOrCreateConsIntVector(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, bool EmitIR)
const TargetRegisterClass * getRegClass(SPIRVType *SpvType) const
SPIRVType * getOrCreateSPIRVVectorType(SPIRVType *BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder, bool EmitIR)
SPIRVType * getOrCreateOpTypeCoopMatr(MachineIRBuilder &MIRBuilder, const TargetExtType *ExtensionType, const SPIRVType *ElemType, uint32_t Scope, uint32_t Rows, uint32_t Columns, uint32_t Use, bool EmitIR)
bool isScalarOrVectorOfType(Register VReg, unsigned TypeOpcode) const
Register getOrCreateConstIntArray(uint64_t Val, size_t Num, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
SPIRVType * getOrCreateOpTypeDeviceEvent(MachineIRBuilder &MIRBuilder)
SPIRVType * getImageType(const TargetExtType *ExtensionType, const SPIRV::AccessQualifier::AccessQualifier Qualifier, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineIRBuilder &MIRBuilder)
Register buildConstantInt(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, bool EmitIR, bool ZeroAsNull=true)
LLT getRegType(SPIRVType *SpvType) const
SPIRV::StorageClass::StorageClass getPointerStorageClass(Register VReg) const
SPIRVType * getOrCreateOpTypeSampler(MachineIRBuilder &MIRBuilder)
Register buildConstantSampler(Register Res, unsigned AddrMode, unsigned Param, unsigned FilerMode, MachineIRBuilder &MIRBuilder)
Register getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorBitWidth(const SPIRVType *Type) const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
static constexpr size_t npos
bool consume_back(StringRef Suffix)
Returns true if this StringRef has the given suffix and removes that suffix.
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
std::string str() const
str - Get the contents as an std::string.
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
bool contains_insensitive(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
constexpr size_t size() const
size - Get the string size.
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
bool consume_front(StringRef Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
size_t find_first_of(char C, size_t From=0) const
Find the first character in the string that is C, or npos if not found.
size_t rfind(char C, size_t From=npos) const
Search for the last character C in the string.
size_t find(char C, size_t From=0) const
Search for the first character C in the string.
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
A switch()-like statement whose cases are string literals.
StringSwitch & EndsWith(StringLiteral S, T Value)
Class to represent struct types.
Class to represent target extensions types, which are generally unintrospectable from target-independ...
ArrayRef< Type * > type_params() const
Return the type parameters for this particular target extension type.
unsigned getNumIntParameters() const
static LLVM_ABI TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types={}, ArrayRef< unsigned > Ints={})
Return a target extension type having the specified name and optional type and integer parameters.
Type * getTypeParameter(unsigned i) const
unsigned getNumTypeParameters() const
unsigned getIntParameter(unsigned i) const
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
LLVM_ABI StringRef getStructName() const
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
bool isVoidTy() const
Return true if this is 'void'.
LLVM Value Representation.
LLVM_ABI Value(Type *Ty, unsigned scid)
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Represents a version number in the form major[.minor[.subminor[.build]]].
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
LLVM_C_ABI LLVMTypeRef LLVMVectorType(LLVMTypeRef ElementType, unsigned ElementCount)
Create a vector type that contains a defined type and has a specific number of elements.
std::string lookupBuiltinNameHelper(StringRef DemangledCall, FPDecorationId *DecorationId)
Parses the name part of the demangled builtin call.
Type * parseBuiltinCallArgumentType(StringRef TypeStr, LLVMContext &Ctx)
bool parseBuiltinTypeStr(SmallVector< StringRef, 10 > &BuiltinArgsTypeStrs, const StringRef DemangledCall, LLVMContext &Ctx)
std::optional< bool > lowerBuiltin(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, MachineIRBuilder &MIRBuilder, const Register OrigRet, const Type *OrigRetTy, const SmallVectorImpl< Register > &Args, SPIRVGlobalRegistry *GR, const CallBase &CB)
std::tuple< int, unsigned, unsigned > mapBuiltinToOpcode(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set)
Helper function for finding a builtin function attributes by a demangled function name.
Type * parseBuiltinCallArgumentBaseType(const StringRef DemangledCall, unsigned ArgIdx, LLVMContext &Ctx)
Parses the provided ArgIdx argument base type in the DemangledCall skeleton.
TargetExtType * parseBuiltinTypeNameToTargetExtType(std::string TypeName, LLVMContext &Context)
Translates a string representing a SPIR-V or OpenCL builtin type to a TargetExtType that can be furth...
SPIRVType * lowerBuiltinType(const Type *OpaqueType, SPIRV::AccessQualifier::AccessQualifier AccessQual, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
static bool build2DBlockIOINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's 2d block io instructions.
static SPIRVType * getVulkanBufferType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateExtInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, const CallBase &CB)
static bool generateBindlessImageINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateGetQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getInlineSpirvType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateConstructInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicFlagInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic flag instructions (e.g.
static bool generateImageSizeQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::SamplerFilterMode::SamplerFilterMode getSamplerFilterModeFromBitmask(unsigned Bitmask)
static bool buildAtomicStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic store instruction.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
static bool buildExtendedBitOpsInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building extended bit operations.
static const Type * getBlockStructType(Register ParamReg, MachineRegisterInfo *MRI)
static bool generateGroupInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
FPDecorationId demangledPostfixToDecorationId(const std::string &S)
void updateRegType(Register Reg, Type *Ty, SPIRVType *SpirvTy, SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Helper external function for assigning SPIRVType to a register, ensuring the register class and type ...
static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim)
static bool generateImageChannelDataTypeInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool builtinMayNeedPromotionToVec(uint32_t BuiltinNumber)
static bool generateICarryBorrowInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildScopeReg(Register CLScopeRegister, SPIRV::Scope::Scope Scope, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI)
static std::tuple< Register, SPIRVType * > buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType, SPIRVGlobalRegistry *GR)
Helper function building either a resulting scalar or vector bool register depending on the expected ...
static unsigned getNumSizeComponents(SPIRVType *imgType)
Helper function for obtaining the number of size components.
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
static Register buildConstantIntReg32(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getSampledImageType(const TargetExtType *OpaqueType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
SmallVector< MachineInstr *, 4 > createContinuedInstructions(MachineIRBuilder &MIRBuilder, unsigned Opcode, unsigned MinWC, unsigned ContinuedOpcode, ArrayRef< Register > Args, Register ReturnRegister, Register TypeID)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
constexpr unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
static bool generateSampleImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateBarrierInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI void SplitString(StringRef Source, SmallVectorImpl< StringRef > &OutFragments, StringRef Delimiters=" \t\n\v\f\r")
SplitString - Split up the specified string according to the specified delimiters,...
static SPIRVType * getCoopMatrType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildImageChannelDataTypeInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateKernelClockInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static void setRegClassIfNull(Register Reg, MachineRegisterInfo *MRI, SPIRVGlobalRegistry *GR)
static bool generateGroupUniformInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateWaveInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Register createVirtualRegister(SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building barriers, i.e., memory/control ordering operations.
static bool generateAsyncCopy(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope)
static bool buildAPFixedPointInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getSamplerType(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateBlockingPipesInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
static Register buildLoadInst(SPIRVType *BaseType, Register PtrRegister, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, LLT LowLevelType, Register DestinationReg=Register(0))
Helper function for building a load instruction loading into the DestinationReg.
static bool generateEnqueueInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
static bool buildSelectInst(MachineIRBuilder &MIRBuilder, Register ReturnRegister, Register SourceRegister, const SPIRVType *ReturnType, SPIRVGlobalRegistry *GR)
Helper function for building either a vector or scalar select instruction depending on the expected R...
static const Type * getMachineInstrType(MachineInstr *MI)
bool isDigit(char C)
Checks if character C is one of the 10 decimal digits.
static SPIRV::SamplerAddressingMode::SamplerAddressingMode getSamplerAddressingModeFromBitmask(unsigned Bitmask)
static bool generateAtomicInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
const MachineInstr SPIRVType
static SPIRVType * getLayoutType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateDotOrFMulInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildBuiltinVariableLoad(MachineIRBuilder &MIRBuilder, SPIRVType *VariableType, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType, Register Reg=Register(0), bool isConst=true, const std::optional< SPIRV::LinkageType::LinkageType > &LinkageTy={ SPIRV::LinkageType::Import})
Helper function for building a load instruction for loading a builtin global variable of BuiltinValue...
static bool generateConvertInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateTernaryBitwiseFunctionINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateCastToPtrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
constexpr bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
static Register buildMemSemanticsReg(Register SemanticsRegister, Register PtrRegister, unsigned &Semantics, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static unsigned getConstFromIntrinsic(Register Reg, MachineRegisterInfo *MRI)
static bool generateImageMiscQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSelectInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic load instruction.
static bool generateIntelSubgroupsInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateExtendedBitOpsInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildPipeInst(const SPIRV::IncomingCall *Call, unsigned Opcode, unsigned Scope, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSpecConstantInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getOrCreateSPIRVDeviceEventPointer(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * parseBasicTypeName(StringRef &TypeName, LLVMContext &Ctx)
static bool generateVectorLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, uint64_t DefaultValue)
static bool generateCoopMatrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SmallVector< Register > getBuiltinCallArguments(const SPIRV::IncomingCall *Call, uint32_t BuiltinNumber, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildBindlessImageINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's bindless image instructions.
static std::unique_ptr< const SPIRV::IncomingCall > lookupBuiltin(StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
Looks up the demangled builtin call in the SPIRVBuiltins.td records using the provided DemangledCall ...
static bool buildAtomicFloatingRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic floating-type instruction.
MachineInstr * getDefInstrMaybeConstant(Register &ConstReg, const MachineRegisterInfo *MRI)
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
static bool generate2DBlockIOINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateReadImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool hasBuiltinTypePrefix(StringRef Name)
static bool buildEnqueueKernel(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * getMDOperandAsType(const MDNode *N, unsigned I)
static bool generatePipeInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildTernaryBitwiseFunctionINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's OpBitwiseFunctionINTEL instruction.
static bool generateAPFixedPointInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic instructions.
static SPIRV::MemorySemantics::MemorySemantics getSPIRVMemSemantics(std::memory_order MemOrder)
static bool generateRelationalInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicInitInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
Helper function for translating atomic init to OpStore.
static bool generateWriteImageInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getPipeType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Type * parseTypeString(const StringRef Name, LLVMContext &Context)
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
static bool generatePredicatedLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateAtomicFloatingInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildNDRange(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getNonParameterizedType(const TargetExtType *ExtensionType, const SPIRV::BuiltinType *TypeRecord, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static MachineInstr * getBlockStructInstr(Register ParamReg, MachineRegisterInfo *MRI)
static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode, const SPIRV::IncomingCall *Call, Register TypeReg, ArrayRef< uint32_t > ImmArgs={})
static unsigned getSamplerParamFromBitmask(unsigned Bitmask)
static bool buildAtomicCompareExchangeInst(const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic compare-exchange instruction.
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
static bool generateBuiltinVar(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Implement std::hash so that hash_code can be used in STL containers.
This struct is a compact representation of a valid (non-zero power of two) alignment.
This class contains a discriminated union of information about pointers in memory operands,...
FPRoundingMode::FPRoundingMode RoundingMode
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
const SmallVectorImpl< Register > & Arguments
const std::string BuiltinName
const SPIRVType * ReturnType
const Register ReturnRegister
const DemangledBuiltin * Builtin
IncomingCall(const std::string BuiltinName, const DemangledBuiltin *Builtin, const Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
InstructionSet::InstructionSet Set
StringRef SpirvTypeLiteral
InstructionSet::InstructionSet Set
FPRoundingMode::FPRoundingMode RoundingMode