20#include "llvm/IR/IntrinsicsSPIRV.h"
25#define DEBUG_TYPE "spirv-builtins"
29#define GET_BuiltinGroup_DECL
30#include "SPIRVGenTables.inc"
34 InstructionSet::InstructionSet
Set;
40#define GET_DemangledBuiltins_DECL
41#define GET_DemangledBuiltins_IMPL
63 InstructionSet::InstructionSet
Set;
67#define GET_NativeBuiltins_DECL
68#define GET_NativeBuiltins_IMPL
86#define GET_GroupBuiltins_DECL
87#define GET_GroupBuiltins_IMPL
97#define GET_IntelSubgroupsBuiltins_DECL
98#define GET_IntelSubgroupsBuiltins_IMPL
105#define GET_AtomicFloatingBuiltins_DECL
106#define GET_AtomicFloatingBuiltins_IMPL
113#define GET_GroupUniformBuiltins_DECL
114#define GET_GroupUniformBuiltins_IMPL
118 InstructionSet::InstructionSet
Set;
123#define GET_GetBuiltins_DECL
124#define GET_GetBuiltins_IMPL
128 InstructionSet::InstructionSet
Set;
132#define GET_ImageQueryBuiltins_DECL
133#define GET_ImageQueryBuiltins_IMPL
141#define GET_IntegerDotProductBuiltins_DECL
142#define GET_IntegerDotProductBuiltins_IMPL
146 InstructionSet::InstructionSet
Set;
157 InstructionSet::InstructionSet
Set;
165#define GET_ConvertBuiltins_DECL
166#define GET_ConvertBuiltins_IMPL
168using namespace InstructionSet;
169#define GET_VectorLoadStoreBuiltins_DECL
170#define GET_VectorLoadStoreBuiltins_IMPL
172#define GET_CLMemoryScope_DECL
173#define GET_CLSamplerAddressingMode_DECL
174#define GET_CLMemoryFenceFlags_DECL
175#define GET_ExtendedBuiltins_DECL
176#include "SPIRVGenTables.inc"
188 StringRef PassPrefix =
"(anonymous namespace)::";
190 std::string BuiltinName = DemangledCall.
str();
195 std::size_t Pos = BuiltinName.find(
">(");
196 if (Pos != std::string::npos) {
197 BuiltinName = BuiltinName.substr(0, BuiltinName.rfind(
'<', Pos));
199 Pos = BuiltinName.find(
'(');
200 if (Pos != std::string::npos)
201 BuiltinName = BuiltinName.substr(0, Pos);
203 BuiltinName = BuiltinName.substr(BuiltinName.find_last_of(
' ') + 1);
207 if (BuiltinName.find(PassPrefix) == 0)
208 BuiltinName = BuiltinName.substr(PassPrefix.
size());
209 else if (BuiltinName.find(SpvPrefix) == 0)
210 BuiltinName = BuiltinName.substr(SpvPrefix.
size());
213 if (BuiltinName.rfind(
"__spirv_ocl_", 0) == 0)
214 BuiltinName = BuiltinName.substr(12);
240 static const std::regex SpvWithR(
241 "(__spirv_(ImageSampleExplicitLod|ImageRead|ImageWrite|ImageQuerySizeLod|"
243 "SDotKHR|SUDotKHR|SDotAccSatKHR|UDotAccSatKHR|SUDotAccSatKHR|"
244 "ReadClockKHR|SubgroupBlockReadINTEL|SubgroupImageBlockReadINTEL|"
245 "SubgroupImageMediaBlockReadINTEL|SubgroupImageMediaBlockWriteINTEL|"
247 "UConvert|SConvert|FConvert|SatConvert)[^_]*)(_R[^_]*_?(\\w+)?.*)?");
249 if (std::regex_match(BuiltinName, Match, SpvWithR) && Match.size() > 1) {
250 std::ssub_match SubMatch;
251 if (DecorationId && Match.size() > 3) {
256 BuiltinName = SubMatch.str();
273static std::unique_ptr<const SPIRV::IncomingCall>
275 SPIRV::InstructionSet::InstructionSet Set,
282 DemangledCall.
slice(DemangledCall.
find(
'(') + 1, DemangledCall.
find(
')'));
283 BuiltinArgs.
split(BuiltinArgumentTypes,
',', -1,
false);
288 if ((Builtin = SPIRV::lookupBuiltin(BuiltinName, Set)))
289 return std::make_unique<SPIRV::IncomingCall>(
290 BuiltinName, Builtin, ReturnRegister, ReturnType,
Arguments);
295 if (BuiltinArgumentTypes.
size() >= 1) {
296 char FirstArgumentType = BuiltinArgumentTypes[0][0];
301 switch (FirstArgumentType) {
304 if (Set == SPIRV::InstructionSet::OpenCL_std)
306 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
314 if (Set == SPIRV::InstructionSet::OpenCL_std)
316 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
323 if (Set == SPIRV::InstructionSet::OpenCL_std ||
324 Set == SPIRV::InstructionSet::GLSL_std_450)
330 if (!Prefix.empty() &&
331 (Builtin = SPIRV::lookupBuiltin(Prefix + BuiltinName, Set)))
332 return std::make_unique<SPIRV::IncomingCall>(
333 BuiltinName, Builtin, ReturnRegister, ReturnType,
Arguments);
340 switch (FirstArgumentType) {
361 if (!Suffix.empty() &&
362 (Builtin = SPIRV::lookupBuiltin(BuiltinName + Suffix, Set)))
363 return std::make_unique<SPIRV::IncomingCall>(
364 BuiltinName, Builtin, ReturnRegister, ReturnType,
Arguments);
379 assert(
MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST &&
380 MI->getOperand(1).isReg());
381 Register BitcastReg =
MI->getOperand(1).getReg();
395 assert(
DefMI->getOpcode() == TargetOpcode::G_CONSTANT &&
396 DefMI->getOperand(1).isCImm());
397 return DefMI->getOperand(1).getCImm()->getValue().getZExtValue();
409 Register ValueReg =
MI->getOperand(0).getReg();
415 assert(Ty &&
"Type is expected");
427 if (
MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE)
428 return MI->getOperand(1).getGlobal()->getType();
430 "Blocks in OpenCL C must be traceable to allocation site");
442static std::tuple<Register, SPIRVTypeInst>
448 if (ResultType->
getOpcode() == SPIRV::OpTypeVector) {
463 return std::make_tuple(ResultRegister, BoolType);
473 if (ReturnType->getOpcode() == SPIRV::OpTypeVector) {
484 return MIRBuilder.
buildSelect(ReturnRegister, SourceRegister, TrueConst,
494 if (!DestinationReg.isValid())
499 MIRBuilder.
buildLoad(DestinationReg, PtrRegister, PtrInfo,
Align());
500 return DestinationReg;
509 const std::optional<SPIRV::LinkageType::LinkageType> &LinkageTy = {
510 SPIRV::LinkageType::Import}) {
518 VariableType, MIRBuilder, SPIRV::StorageClass::Input);
524 SPIRV::StorageClass::Input,
nullptr, isConst, LinkageTy,
531 return LoadedRegister;
542static SPIRV::MemorySemantics::MemorySemantics
545 case std::memory_order_relaxed:
546 return SPIRV::MemorySemantics::None;
547 case std::memory_order_acquire:
548 return SPIRV::MemorySemantics::Acquire;
549 case std::memory_order_release:
550 return SPIRV::MemorySemantics::Release;
551 case std::memory_order_acq_rel:
552 return SPIRV::MemorySemantics::AcquireRelease;
553 case std::memory_order_seq_cst:
554 return SPIRV::MemorySemantics::SequentiallyConsistent;
562 case SPIRV::CLMemoryScope::memory_scope_work_item:
563 return SPIRV::Scope::Invocation;
564 case SPIRV::CLMemoryScope::memory_scope_work_group:
565 return SPIRV::Scope::Workgroup;
566 case SPIRV::CLMemoryScope::memory_scope_device:
567 return SPIRV::Scope::Device;
568 case SPIRV::CLMemoryScope::memory_scope_all_svm_devices:
569 return SPIRV::Scope::CrossDevice;
570 case SPIRV::CLMemoryScope::memory_scope_sub_group:
571 return SPIRV::Scope::Subgroup;
584 SPIRV::Scope::Scope Scope,
588 if (CLScopeRegister.
isValid()) {
590 static_cast<SPIRV::CLMemoryScope
>(
getIConstVal(CLScopeRegister, MRI));
593 if (CLScope ==
static_cast<unsigned>(Scope)) {
594 MRI->
setRegClass(CLScopeRegister, &SPIRV::iIDRegClass);
595 return CLScopeRegister;
607 SpvType ? GR->
getRegClass(SpvType) : &SPIRV::iIDRegClass);
611 Register PtrRegister,
unsigned &Semantics,
614 if (SemanticsRegister.
isValid()) {
616 std::memory_order Order =
617 static_cast<std::memory_order
>(
getIConstVal(SemanticsRegister, MRI));
621 if (
static_cast<unsigned>(Order) == Semantics) {
622 MRI->
setRegClass(SemanticsRegister, &SPIRV::iIDRegClass);
623 return SemanticsRegister;
636 unsigned Sz =
Call->Arguments.size() - ImmArgs.size();
637 for (
unsigned i = 0; i < Sz; ++i)
638 MIB.addUse(
Call->Arguments[i]);
647 if (
Call->isSpirvOp())
651 "Need 2 arguments for atomic init translation");
663 if (
Call->isSpirvOp())
671 Call->Arguments.size() > 1
675 if (
Call->Arguments.size() > 2) {
677 MemSemanticsReg =
Call->Arguments[2];
680 SPIRV::MemorySemantics::SequentiallyConsistent |
698 if (
Call->isSpirvOp())
706 SPIRV::MemorySemantics::SequentiallyConsistent |
721 if (
Call->isSpirvOp())
725 bool IsCmpxchg =
Call->Builtin->Name.contains(
"cmpxchg");
735 SPIRV::OpTypePointer);
738 assert(IsCmpxchg ? ExpectedType == SPIRV::OpTypeInt
739 : ExpectedType == SPIRV::OpTypePointer);
744 auto StorageClass =
static_cast<SPIRV::StorageClass::StorageClass
>(
752 ? SPIRV::MemorySemantics::None
753 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
756 ? SPIRV::MemorySemantics::None
757 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
758 if (
Call->Arguments.size() >= 4) {
760 "Need 5+ args for explicit atomic cmpxchg");
767 if (
static_cast<unsigned>(MemOrdEq) == MemSemEqual)
768 MemSemEqualReg =
Call->Arguments[3];
769 if (
static_cast<unsigned>(MemOrdNeq) == MemSemEqual)
770 MemSemUnequalReg =
Call->Arguments[4];
774 if (!MemSemUnequalReg.
isValid())
778 auto Scope = IsCmpxchg ? SPIRV::Scope::Workgroup : SPIRV::Scope::Device;
779 if (
Call->Arguments.size() >= 6) {
781 "Extra args for explicit atomic cmpxchg");
782 auto ClScope =
static_cast<SPIRV::CLMemoryScope
>(
785 if (ClScope ==
static_cast<unsigned>(Scope))
786 ScopeReg =
Call->Arguments[5];
797 :
Call->ReturnRegister;
822 if (
Call->isSpirvOp())
831 "Too many args for explicit atomic RMW");
832 ScopeRegister =
buildScopeReg(ScopeRegister, SPIRV::Scope::Workgroup,
833 MIRBuilder, GR, MRI);
836 unsigned Semantics = SPIRV::MemorySemantics::None;
840 Semantics, MIRBuilder, GR);
844 if (
Call->ReturnType->getOpcode() == SPIRV::OpTypeFloat) {
845 if (Opcode == SPIRV::OpAtomicIAdd) {
846 Opcode = SPIRV::OpAtomicFAddEXT;
847 }
else if (Opcode == SPIRV::OpAtomicISub) {
850 Opcode = SPIRV::OpAtomicFAddEXT;
861 ValueReg = NegValueReg;
880 "Wrong number of atomic floating-type builtin");
900 bool IsSet = Opcode == SPIRV::OpAtomicFlagTestAndSet;
902 if (
Call->isSpirvOp())
908 unsigned Semantics = SPIRV::MemorySemantics::SequentiallyConsistent;
912 Semantics, MIRBuilder, GR);
914 assert((Opcode != SPIRV::OpAtomicFlagClear ||
915 (Semantics != SPIRV::MemorySemantics::Acquire &&
916 Semantics != SPIRV::MemorySemantics::AcquireRelease)) &&
917 "Invalid memory order argument!");
922 buildScopeReg(ScopeRegister, SPIRV::Scope::Device, MIRBuilder, GR, MRI);
940 if ((Opcode == SPIRV::OpControlBarrierArriveINTEL ||
941 Opcode == SPIRV::OpControlBarrierWaitINTEL) &&
942 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_split_barrier)) {
943 std::string DiagMsg = std::string(Builtin->
Name) +
944 ": the builtin requires the following SPIR-V "
945 "extension: SPV_INTEL_split_barrier";
949 if (
Call->isSpirvOp())
954 unsigned MemSemantics = SPIRV::MemorySemantics::None;
956 if (MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE)
957 MemSemantics |= SPIRV::MemorySemantics::WorkgroupMemory;
959 if (MemFlags & SPIRV::CLK_GLOBAL_MEM_FENCE)
960 MemSemantics |= SPIRV::MemorySemantics::CrossWorkgroupMemory;
962 if (MemFlags & SPIRV::CLK_IMAGE_MEM_FENCE)
963 MemSemantics |= SPIRV::MemorySemantics::ImageMemory;
965 if (Opcode == SPIRV::OpMemoryBarrier)
969 else if (Opcode == SPIRV::OpControlBarrierArriveINTEL)
970 MemSemantics |= SPIRV::MemorySemantics::Release;
971 else if (Opcode == SPIRV::OpControlBarrierWaitINTEL)
972 MemSemantics |= SPIRV::MemorySemantics::Acquire;
974 MemSemantics |= SPIRV::MemorySemantics::SequentiallyConsistent;
977 MemFlags == MemSemantics
981 SPIRV::Scope::Scope Scope = SPIRV::Scope::Workgroup;
982 SPIRV::Scope::Scope MemScope = Scope;
983 if (
Call->Arguments.size() >= 2) {
985 ((Opcode != SPIRV::OpMemoryBarrier &&
Call->Arguments.size() == 2) ||
986 (Opcode == SPIRV::OpMemoryBarrier &&
Call->Arguments.size() == 3)) &&
987 "Extra args for explicitly scoped barrier");
988 Register ScopeArg = (Opcode == SPIRV::OpMemoryBarrier) ?
Call->Arguments[2]
989 :
Call->Arguments[1];
990 SPIRV::CLMemoryScope CLScope =
991 static_cast<SPIRV::CLMemoryScope
>(
getIConstVal(ScopeArg, MRI));
993 if (!(MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE) ||
994 (Opcode == SPIRV::OpMemoryBarrier))
996 if (CLScope ==
static_cast<unsigned>(Scope))
997 ScopeReg =
Call->Arguments[1];
1004 if (Opcode != SPIRV::OpMemoryBarrier)
1006 MIB.
addUse(MemSemanticsReg);
1018 if ((Opcode == SPIRV::OpBitFieldInsert ||
1019 Opcode == SPIRV::OpBitFieldSExtract ||
1020 Opcode == SPIRV::OpBitFieldUExtract || Opcode == SPIRV::OpBitReverse) &&
1021 !ST->canUseExtension(SPIRV::Extension::SPV_KHR_bit_instructions)) {
1022 std::string DiagMsg = std::string(Builtin->
Name) +
1023 ": the builtin requires the following SPIR-V "
1024 "extension: SPV_KHR_bit_instructions";
1029 if (
Call->isSpirvOp())
1036 for (
unsigned i = 0; i <
Call->Arguments.size(); ++i)
1048 if (
Call->isSpirvOp())
1065 if (
Call->isSpirvOp())
1072 for (
unsigned i = 0; i <
Call->Arguments.size(); ++i)
1082 if (
Call->isSpirvOp())
1089 for (
unsigned i = 0; i <
Call->Arguments.size(); ++i)
1101 if (
Call->isSpirvOp())
1107 for (
unsigned i = 0; i <
Call->Arguments.size(); ++i)
1117 case SPIRV::OpCommitReadPipe:
1118 case SPIRV::OpCommitWritePipe:
1120 case SPIRV::OpGroupCommitReadPipe:
1121 case SPIRV::OpGroupCommitWritePipe:
1122 case SPIRV::OpGroupReserveReadPipePackets:
1123 case SPIRV::OpGroupReserveWritePipePackets: {
1127 MRI->
setRegClass(ScopeConstReg, &SPIRV::iIDRegClass);
1131 if (Opcode == SPIRV::OpGroupReserveReadPipePackets ||
1132 Opcode == SPIRV::OpGroupReserveWritePipePackets)
1136 MIB.
addUse(ScopeConstReg);
1137 for (
unsigned int i = 0; i <
Call->Arguments.size(); ++i)
1150 case SPIRV::Dim::DIM_1D:
1151 case SPIRV::Dim::DIM_Buffer:
1153 case SPIRV::Dim::DIM_2D:
1154 case SPIRV::Dim::DIM_Cube:
1155 case SPIRV::Dim::DIM_Rect:
1157 case SPIRV::Dim::DIM_3D:
1170 return arrayed ? numComps + 1 : numComps;
1174 switch (BuiltinNumber) {
1175 case SPIRV::OpenCLExtInst::s_min:
1176 case SPIRV::OpenCLExtInst::u_min:
1177 case SPIRV::OpenCLExtInst::s_max:
1178 case SPIRV::OpenCLExtInst::u_max:
1179 case SPIRV::OpenCLExtInst::fmax:
1180 case SPIRV::OpenCLExtInst::fmin:
1181 case SPIRV::OpenCLExtInst::fmax_common:
1182 case SPIRV::OpenCLExtInst::fmin_common:
1183 case SPIRV::OpenCLExtInst::s_clamp:
1184 case SPIRV::OpenCLExtInst::fclamp:
1185 case SPIRV::OpenCLExtInst::u_clamp:
1186 case SPIRV::OpenCLExtInst::mix:
1187 case SPIRV::OpenCLExtInst::step:
1188 case SPIRV::OpenCLExtInst::smoothstep:
1189 case SPIRV::OpenCLExtInst::ldexp:
1190 case SPIRV::OpenCLExtInst::pown:
1191 case SPIRV::OpenCLExtInst::rootn:
1208 unsigned ResultElementCount =
1210 bool MayNeedPromotionToVec =
1213 if (!MayNeedPromotionToVec)
1214 return {
Call->Arguments.begin(),
Call->Arguments.end()};
1221 ArgumentType !=
Call->ReturnType) {
1223 ArgumentType, ResultElementCount, MIRBuilder,
true);
1226 auto VecSplat = MIRBuilder.
buildInstr(SPIRV::OpCompositeConstruct)
1229 for (
unsigned I = 0;
I != ResultElementCount; ++
I)
1243 SPIRV::lookupExtendedBuiltin(Builtin->
Name, Builtin->
Set)->Number;
1250 if (ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2) &&
1251 (
Number == SPIRV::OpenCLExtInst::fmin_common ||
1252 Number == SPIRV::OpenCLExtInst::fmax_common)) {
1254 ? SPIRV::OpenCLExtInst::fmin
1255 : SPIRV::OpenCLExtInst::fmax;
1263 if (ST.canUseExtension(SPIRV::Extension::SPV_KHR_fma) &&
1264 Number == SPIRV::OpenCLExtInst::fma) {
1272 MIB = MIRBuilder.
buildInstr(SPIRV::OpExtInst)
1275 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
1283 if (OrigNumber == SPIRV::OpenCLExtInst::fmin_common ||
1284 OrigNumber == SPIRV::OpenCLExtInst::fmax_common) {
1293 if (ST.isKernel() ||
1294 ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2)) {
1300 I !=
E && (AddNoNan || AddNoInf); ++
I) {
1304 AddNoNan = AddNoNan && ArgTest &
fcNan;
1305 AddNoInf = AddNoInf && ArgTest &
fcInf;
1323 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
1327 std::tie(CompareRegister, RelationType) =
1333 Call->Arguments.end());
1334 if ((Opcode == SPIRV::OpAny || Opcode == SPIRV::OpAll) &&
1363 Call->ReturnType, GR);
1371 SPIRV::lookupGroupBuiltin(Builtin->
Name);
1374 if (
Call->isSpirvOp()) {
1377 if (GroupBuiltin->
Opcode ==
1378 SPIRV::OpSubgroupMatrixMultiplyAccumulateINTEL &&
1379 Call->Arguments.size() > 4)
1388 if (!
MI ||
MI->getOpcode() != TargetOpcode::G_CONSTANT)
1390 "Group Operation parameter must be an integer constant");
1391 uint64_t GrpOp =
MI->getOperand(1).getCImm()->getValue().getZExtValue();
1398 for (
unsigned i = 2; i <
Call->Arguments.size(); ++i)
1411 if (ArgInstruction->
getOpcode() == TargetOpcode::G_CONSTANT) {
1412 if (BoolRegType->
getOpcode() != SPIRV::OpTypeBool)
1416 if (BoolRegType->
getOpcode() == SPIRV::OpTypeInt) {
1425 }
else if (BoolRegType->
getOpcode() != SPIRV::OpTypeBool) {
1437 const bool HasBoolReturnTy =
1442 if (HasBoolReturnTy)
1443 std::tie(GroupResultRegister, GroupResultType) =
1446 auto Scope = Builtin->
Name.
starts_with(
"sub_group") ? SPIRV::Scope::Subgroup
1447 : SPIRV::Scope::Workgroup;
1451 if (GroupBuiltin->
Opcode == SPIRV::OpGroupBroadcast &&
1452 Call->Arguments.size() > 2) {
1460 if (!ElemType || ElemType->
getOpcode() != SPIRV::OpTypeInt)
1462 unsigned VecLen =
Call->Arguments.size() - 1;
1471 for (
unsigned i = 1; i <
Call->Arguments.size(); i++) {
1472 MIB.addUse(
Call->Arguments[i]);
1481 .
addDef(GroupResultRegister)
1487 if (
Call->Arguments.size() > 0) {
1488 MIB.addUse(Arg0.
isValid() ? Arg0 :
Call->Arguments[0]);
1493 for (
unsigned i = 1; i <
Call->Arguments.size(); i++)
1494 MIB.addUse(
Call->Arguments[i]);
1498 if (HasBoolReturnTy)
1500 Call->ReturnType, GR);
1511 SPIRV::lookupIntelSubgroupsBuiltin(Builtin->
Name);
1513 if (IntelSubgroups->
IsMedia &&
1514 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_media_block_io)) {
1515 std::string DiagMsg = std::string(Builtin->
Name) +
1516 ": the builtin requires the following SPIR-V "
1517 "extension: SPV_INTEL_media_block_io";
1519 }
else if (!IntelSubgroups->
IsMedia &&
1520 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1521 std::string DiagMsg = std::string(Builtin->
Name) +
1522 ": the builtin requires the following SPIR-V "
1523 "extension: SPV_INTEL_subgroups";
1528 if (
Call->isSpirvOp()) {
1529 bool IsSet = OpCode != SPIRV::OpSubgroupBlockWriteINTEL &&
1530 OpCode != SPIRV::OpSubgroupImageBlockWriteINTEL &&
1531 OpCode != SPIRV::OpSubgroupImageMediaBlockWriteINTEL;
1537 if (IntelSubgroups->
IsBlock) {
1540 if (Arg0Type->getOpcode() == SPIRV::OpTypeImage) {
1546 case SPIRV::OpSubgroupBlockReadINTEL:
1547 OpCode = SPIRV::OpSubgroupImageBlockReadINTEL;
1549 case SPIRV::OpSubgroupBlockWriteINTEL:
1550 OpCode = SPIRV::OpSubgroupImageBlockWriteINTEL;
1573 for (
size_t i = 0; i <
Call->Arguments.size(); ++i)
1584 if (!ST->canUseExtension(
1585 SPIRV::Extension::SPV_KHR_uniform_group_instructions)) {
1586 std::string DiagMsg = std::string(Builtin->
Name) +
1587 ": the builtin requires the following SPIR-V "
1588 "extension: SPV_KHR_uniform_group_instructions";
1592 SPIRV::lookupGroupUniformBuiltin(Builtin->
Name);
1602 if (!Const || Const->getOpcode() != TargetOpcode::G_CONSTANT)
1604 "expect a constant group operation for a uniform group instruction",
1607 if (!ConstOperand.
isCImm())
1617 MIB.addUse(ValueReg);
1628 if (!ST->canUseExtension(SPIRV::Extension::SPV_KHR_shader_clock)) {
1629 std::string DiagMsg = std::string(Builtin->
Name) +
1630 ": the builtin requires the following SPIR-V "
1631 "extension: SPV_KHR_shader_clock";
1637 if (Builtin->
Name ==
"__spirv_ReadClockKHR") {
1644 SPIRV::Scope::Scope ScopeArg =
1646 .
EndsWith(
"device", SPIRV::Scope::Scope::Device)
1647 .
EndsWith(
"work_group", SPIRV::Scope::Scope::Workgroup)
1648 .
EndsWith(
"sub_group", SPIRV::Scope::Scope::Subgroup);
1689 SPIRV::BuiltIn::BuiltIn BuiltinValue,
1692 const unsigned ResultWidth =
Call->ReturnType->getOperand(1).getImm();
1703 bool IsConstantIndex =
1704 IndexInstruction->getOpcode() == TargetOpcode::G_CONSTANT;
1708 if (IsConstantIndex &&
getIConstVal(IndexRegister, MRI) >= 3) {
1710 if (PointerSize != ResultWidth) {
1712 MRI->
setRegClass(DefaultReg, &SPIRV::iIDRegClass);
1714 MIRBuilder.
getMF());
1715 ToTruncate = DefaultReg;
1719 MIRBuilder.
buildCopy(DefaultReg, NewRegister);
1728 if (!IsConstantIndex || PointerSize != ResultWidth) {
1737 ExtractInst.
addUse(LoadedVector).
addUse(IndexRegister);
1740 if (!IsConstantIndex) {
1741 updateRegType(Extracted,
nullptr, PointerSizeType, GR, MIRBuilder, *MRI);
1748 MRI->
setRegClass(CompareRegister, &SPIRV::iIDRegClass);
1763 if (PointerSize != ResultWidth) {
1766 MRI->
setRegClass(SelectionResult, &SPIRV::iIDRegClass);
1768 MIRBuilder.
getMF());
1771 MIRBuilder.
buildSelect(SelectionResult, CompareRegister, Extracted,
1773 ToTruncate = SelectionResult;
1775 ToTruncate = Extracted;
1779 if (PointerSize != ResultWidth)
1789 SPIRV::BuiltIn::BuiltIn
Value =
1790 SPIRV::lookupGetBuiltin(Builtin->
Name, Builtin->
Set)->
Value;
1792 if (
Value == SPIRV::BuiltIn::GlobalInvocationId)
1798 if (
Call->ReturnType->getOpcode() == SPIRV::OpTypeVector)
1805 LLType,
Call->ReturnRegister);
1814 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
1817 case SPIRV::OpStore:
1819 case SPIRV::OpAtomicLoad:
1821 case SPIRV::OpAtomicStore:
1823 case SPIRV::OpAtomicCompareExchange:
1824 case SPIRV::OpAtomicCompareExchangeWeak:
1827 case SPIRV::OpAtomicIAdd:
1828 case SPIRV::OpAtomicISub:
1829 case SPIRV::OpAtomicOr:
1830 case SPIRV::OpAtomicXor:
1831 case SPIRV::OpAtomicAnd:
1832 case SPIRV::OpAtomicExchange:
1833 case SPIRV::OpAtomicSMax:
1834 case SPIRV::OpAtomicSMin:
1835 case SPIRV::OpAtomicUMax:
1836 case SPIRV::OpAtomicUMin:
1838 case SPIRV::OpMemoryBarrier:
1840 case SPIRV::OpAtomicFlagTestAndSet:
1841 case SPIRV::OpAtomicFlagClear:
1844 if (
Call->isSpirvOp())
1856 unsigned Opcode = SPIRV::lookupAtomicFloatingBuiltin(Builtin->
Name)->Opcode;
1859 case SPIRV::OpAtomicFAddEXT:
1860 case SPIRV::OpAtomicFMinEXT:
1861 case SPIRV::OpAtomicFMaxEXT:
1874 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
1885 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
1887 if (Opcode == SPIRV::OpGenericCastToPtrExplicit) {
1888 SPIRV::StorageClass::StorageClass ResSC =
1899 MIRBuilder.
buildInstr(TargetOpcode::G_ADDRSPACE_CAST)
1910 if (
Call->isSpirvOp())
1915 SPIRV::OpTypeVector;
1917 uint32_t OC = IsVec ? SPIRV::OpDot : SPIRV::OpFMulS;
1918 bool IsSwapReq =
false;
1923 (ST->canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
1927 SPIRV::lookupIntegerDotProductBuiltin(Builtin->
Name);
1937 bool IsFirstSigned = TypeStrs[0].trim()[0] !=
'u';
1938 bool IsSecondSigned = TypeStrs[1].trim()[0] !=
'u';
1940 if (
Call->BuiltinName ==
"dot") {
1941 if (IsFirstSigned && IsSecondSigned)
1943 else if (!IsFirstSigned && !IsSecondSigned)
1946 OC = SPIRV::OpSUDot;
1950 }
else if (
Call->BuiltinName ==
"dot_acc_sat") {
1951 if (IsFirstSigned && IsSecondSigned)
1952 OC = SPIRV::OpSDotAccSat;
1953 else if (!IsFirstSigned && !IsSecondSigned)
1954 OC = SPIRV::OpUDotAccSat;
1956 OC = SPIRV::OpSUDotAccSat;
1972 for (
size_t i = 2; i <
Call->Arguments.size(); ++i)
1975 for (
size_t i = 0; i <
Call->Arguments.size(); ++i)
1981 if (!IsVec && OC != SPIRV::OpFMulS)
1982 MIB.
addImm(SPIRV::PackedVectorFormat4x8Bit);
1991 SPIRV::BuiltIn::BuiltIn
Value =
1992 SPIRV::lookupGetBuiltin(Builtin->
Name, Builtin->
Set)->
Value;
1995 assert(
Call->ReturnType->getOpcode() == SPIRV::OpTypeInt);
1999 MIRBuilder,
Call->ReturnType, GR,
Value, LLType,
Call->ReturnRegister,
2000 false, std::nullopt);
2036 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2043 if (RetType->
getOpcode() != SPIRV::OpTypeStruct)
2045 "overflow builtins");
2049 if (!OpType1 || !OpType2 || OpType1 != OpType2)
2051 if (OpType1->
getOpcode() == SPIRV::OpTypeVector)
2053 case SPIRV::OpIAddCarryS:
2054 Opcode = SPIRV::OpIAddCarryV;
2056 case SPIRV::OpISubBorrowS:
2057 Opcode = SPIRV::OpISubBorrowV;
2062 RetType, MIRBuilder, GR);
2083 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2084 assert((Opcode == SPIRV::OpUMulExtended || Opcode == SPIRV::OpSMulExtended) &&
2085 "Expected OpUMulExtended or OpSMulExtended");
2088 !
Call->ReturnType ||
Call->ReturnType->getOpcode() == SPIRV::OpTypeVoid;
2100 RetType =
Call->ReturnType;
2103 if (!RetType || RetType->
getOpcode() != SPIRV::OpTypeStruct)
2105 "multiplication builtins");
2108 "extended multiplication builtins");
2113 if (!Member0Type || !Member1Type || Member0Type != Member1Type)
2118 if (!OpType1 || !OpType2 || OpType1 != OpType2)
2120 if (OpType1 != Member0Type)
2147 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2161 SPIRV::BuiltIn::BuiltIn
Value =
2162 SPIRV::lookupGetBuiltin(
Call->Builtin->Name,
Call->Builtin->Set)->
Value;
2163 const bool IsDefaultOne = (
Value == SPIRV::BuiltIn::GlobalSize ||
2164 Value == SPIRV::BuiltIn::NumWorkgroups ||
2165 Value == SPIRV::BuiltIn::WorkgroupSize ||
2166 Value == SPIRV::BuiltIn::EnqueuedWorkgroupSize);
2176 SPIRV::lookupImageQueryBuiltin(Builtin->
Name, Builtin->
Set)->Component;
2180 unsigned NumExpectedRetComponents =
2187 if (NumExpectedRetComponents != NumActualRetComponents) {
2188 unsigned Bitwidth =
Call->ReturnType->getOpcode() == SPIRV::OpTypeInt
2189 ?
Call->ReturnType->getOperand(1).getImm()
2196 IntTy, NumActualRetComponents, MIRBuilder,
true);
2201 bool UseQuerySize = IsDimBuf || IsMultisampled;
2203 UseQuerySize ? SPIRV::OpImageQuerySize : SPIRV::OpImageQuerySizeLod;
2210 if (NumExpectedRetComponents == NumActualRetComponents)
2212 if (NumExpectedRetComponents == 1) {
2214 unsigned ExtractedComposite =
2215 Component == 3 ? NumActualRetComponents - 1 : Component;
2216 assert(ExtractedComposite < NumActualRetComponents &&
2217 "Invalid composite index!");
2220 if (QueryResultType->
getOpcode() == SPIRV::OpTypeVector) {
2223 if (TypeReg != NewTypeReg)
2224 TypeReg = NewTypeReg;
2228 MIRBuilder.
buildInstr(SPIRV::OpCompositeExtract)
2232 .
addImm(ExtractedComposite);
2238 auto MIB = MIRBuilder.
buildInstr(SPIRV::OpVectorShuffle)
2243 for (
unsigned i = 0; i < NumExpectedRetComponents; ++i)
2244 MIB.
addImm(i < NumActualRetComponents ? i : 0xffffffff);
2252 assert(
Call->ReturnType->getOpcode() == SPIRV::OpTypeInt &&
2253 "Image samples query result must be of int type!");
2258 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2261 SPIRV::Dim::Dim ImageDimensionality =
static_cast<SPIRV::Dim::Dim
>(
2263 (void)ImageDimensionality;
2266 case SPIRV::OpImageQuerySamples:
2267 assert(ImageDimensionality == SPIRV::Dim::DIM_2D &&
2268 "Image must be of 2D dimensionality");
2270 case SPIRV::OpImageQueryLevels:
2271 assert((ImageDimensionality == SPIRV::Dim::DIM_1D ||
2272 ImageDimensionality == SPIRV::Dim::DIM_2D ||
2273 ImageDimensionality == SPIRV::Dim::DIM_3D ||
2274 ImageDimensionality == SPIRV::Dim::DIM_Cube) &&
2275 "Image must be of 1D/2D/3D/Cube dimensionality");
2287static SPIRV::SamplerAddressingMode::SamplerAddressingMode
2289 switch (Bitmask & SPIRV::CLK_ADDRESS_MODE_MASK) {
2290 case SPIRV::CLK_ADDRESS_CLAMP:
2291 return SPIRV::SamplerAddressingMode::Clamp;
2292 case SPIRV::CLK_ADDRESS_CLAMP_TO_EDGE:
2293 return SPIRV::SamplerAddressingMode::ClampToEdge;
2294 case SPIRV::CLK_ADDRESS_REPEAT:
2295 return SPIRV::SamplerAddressingMode::Repeat;
2296 case SPIRV::CLK_ADDRESS_MIRRORED_REPEAT:
2297 return SPIRV::SamplerAddressingMode::RepeatMirrored;
2298 case SPIRV::CLK_ADDRESS_NONE:
2299 return SPIRV::SamplerAddressingMode::None;
2306 return (Bitmask & SPIRV::CLK_NORMALIZED_COORDS_TRUE) ? 1 : 0;
2309static SPIRV::SamplerFilterMode::SamplerFilterMode
2311 if (Bitmask & SPIRV::CLK_FILTER_LINEAR)
2312 return SPIRV::SamplerFilterMode::Linear;
2313 if (Bitmask & SPIRV::CLK_FILTER_NEAREST)
2314 return SPIRV::SamplerFilterMode::Nearest;
2315 return SPIRV::SamplerFilterMode::Nearest;
2322 if (
Call->isSpirvOp())
2329 if (HasOclSampler) {
2354 if (
Call->ReturnType->getOpcode() != SPIRV::OpTypeVector) {
2361 MIRBuilder.
buildInstr(SPIRV::OpImageSampleExplicitLod)
2366 .
addImm(SPIRV::ImageOperand::Lod)
2368 MIRBuilder.
buildInstr(SPIRV::OpCompositeExtract)
2374 MIRBuilder.
buildInstr(SPIRV::OpImageSampleExplicitLod)
2379 .
addImm(SPIRV::ImageOperand::Lod)
2382 }
else if (HasMsaa) {
2388 .
addImm(SPIRV::ImageOperand::Sample)
2403 if (
Call->isSpirvOp())
2418 if (
Call->Builtin->Name.contains_insensitive(
2419 "__translate_sampler_initializer")) {
2427 }
else if (
Call->Builtin->Name.contains_insensitive(
"__spirv_SampledImage")) {
2434 Call->ReturnRegister.isValid()
2435 ?
Call->ReturnRegister
2443 }
else if (
Call->Builtin->Name.contains_insensitive(
2444 "__spirv_ImageSampleExplicitLod")) {
2446 std::string ReturnType = DemangledCall.
str();
2447 if (DemangledCall.
contains(
"_R")) {
2448 ReturnType = ReturnType.substr(ReturnType.find(
"_R") + 2);
2449 ReturnType = ReturnType.substr(0, ReturnType.find(
'('));
2454 ReturnType, MIRBuilder,
true));
2456 std::string DiagMsg =
2457 "Unable to recognize SPIRV type name: " + ReturnType;
2460 MIRBuilder.
buildInstr(SPIRV::OpImageSampleExplicitLod)
2465 .
addImm(SPIRV::ImageOperand::Lod)
2477 if (!ResTy.
isVector() && CondTy.isVector())
2479 "boolean condition");
2481 Call->Arguments[1],
Call->Arguments[2]);
2489 SPIRV::OpCompositeConstructContinuedINTEL,
2490 Call->Arguments,
Call->ReturnRegister,
2500 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2501 bool IsSet = Opcode != SPIRV::OpCooperativeMatrixStoreKHR &&
2502 Opcode != SPIRV::OpCooperativeMatrixStoreCheckedINTEL &&
2503 Opcode != SPIRV::OpCooperativeMatrixPrefetchINTEL;
2504 unsigned ArgSz =
Call->Arguments.size();
2505 unsigned LiteralIdx = 0;
2508 case SPIRV::OpCooperativeMatrixLoadKHR:
2509 LiteralIdx = ArgSz > 3 ? 3 : 0;
2511 case SPIRV::OpCooperativeMatrixStoreKHR:
2512 LiteralIdx = ArgSz > 4 ? 4 : 0;
2514 case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
2515 LiteralIdx = ArgSz > 7 ? 7 : 0;
2517 case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
2518 LiteralIdx = ArgSz > 8 ? 8 : 0;
2521 case SPIRV::OpCooperativeMatrixMulAddKHR:
2522 LiteralIdx = ArgSz > 3 ? 3 : 0;
2528 if (Opcode == SPIRV::OpCooperativeMatrixPrefetchINTEL) {
2530 auto MIB = MIRBuilder.
buildInstr(SPIRV::OpCooperativeMatrixPrefetchINTEL)
2547 if (Opcode == SPIRV::OpCooperativeMatrixLengthKHR) {
2558 IsSet ? TypeReg :
Register(0), ImmArgs);
2567 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2571 case SPIRV::OpSpecConstant: {
2576 (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
2577 Const->getOpcode() == TargetOpcode::G_FCONSTANT) &&
2578 "Argument should be either an int or floating-point constant");
2581 if (
Call->ReturnType->getOpcode() == SPIRV::OpTypeBool) {
2582 assert(ConstOperand.
isCImm() &&
"Int constant operand is expected");
2584 ? SPIRV::OpSpecConstantTrue
2585 : SPIRV::OpSpecConstantFalse;
2591 if (
Call->ReturnType->getOpcode() != SPIRV::OpTypeBool) {
2592 if (Const->getOpcode() == TargetOpcode::G_CONSTANT)
2604 case SPIRV::OpSpecConstantComposite: {
2606 SPIRV::OpSpecConstantCompositeContinuedINTEL,
2607 Call->Arguments,
Call->ReturnRegister,
2622 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2633 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2643 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2661 InputReg =
Call->Arguments[1];
2664 if (InputType->getTypeID() == llvm::Type::TypeID::TypedPointerTyID) {
2672 MIRBuilder.
buildLoad(PtrInputReg, InputReg, *MMO1);
2673 MRI->
setRegClass(PtrInputReg, &SPIRV::iIDRegClass);
2677 for (
unsigned index = 2; index < 7; index++) {
2696 MRI->
setRegClass(ActualRetValReg, &SPIRV::pIDRegClass);
2697 MIRBuilder.
buildStore(ActualRetValReg,
Call->Arguments[0], *MMO);
2700 for (
unsigned index = 1; index < 6; index++)
2713 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2725 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2735 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2746 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2756 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2758 unsigned Scope = SPIRV::Scope::Workgroup;
2760 Scope = SPIRV::Scope::Subgroup;
2770 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2772 bool IsSet = Opcode != SPIRV::OpPredicatedStoreINTEL;
2773 unsigned ArgSz =
Call->Arguments.size();
2783 IsSet ? TypeReg :
Register(0), ImmArgs);
2808 const unsigned NumCallArgs =
Call->Arguments.size();
2809 const unsigned MaxCallArgs =
Call->Builtin->MaxNumArgs;
2810 const unsigned IncorrectArgIdx = MaxCallArgs + 1;
2813 bool HasSRetArg = RetTy->
isVoidTy();
2815 const unsigned SRetArgIdx = HasSRetArg ? 0 : IncorrectArgIdx;
2816 const unsigned ArgBase = HasSRetArg ? 1 : 0;
2817 const unsigned MaxNDRangeArgs = 3;
2818 const unsigned NumNDRangeArgs = NumCallArgs - ArgBase;
2820 const unsigned GlobalWorkSizeArgIdx =
2821 NumNDRangeArgs < MaxNDRangeArgs ? ArgBase : ArgBase + 1;
2822 const unsigned LocalWorkSizeArgIdx =
2823 (NumNDRangeArgs == 1)
2825 : (NumNDRangeArgs == MaxNDRangeArgs ? ArgBase + 2 : ArgBase + 1);
2826 const unsigned GlobalWorkOffsetArgIdx =
2827 NumNDRangeArgs == MaxNDRangeArgs ? ArgBase : IncorrectArgIdx;
2832 assert(AddressModelBits == 64 || AddressModelBits == 32);
2836 unsigned Dimension = 0;
2837 Call->Builtin->Name.substr(8, 1).getAsInteger(10, Dimension);
2838 assert(Dimension <= 3 && Dimension >= 1);
2845 if (Dimension == 1) {
2848 "Expected scalar integer type");
2850 if (NumNDRangeArgs < MaxNDRangeArgs)
2857 FieldTy, MIRBuilder, SPIRV::AccessQualifier::ReadOnly,
true);
2859 if (NumNDRangeArgs < MaxNDRangeArgs) {
2865 SpvFieldTy, *ST.getInstrInfo());
2871 auto CreateDataRegister = [&](
unsigned Idx) ->
Register {
2872 Register Reg = (Idx == IncorrectArgIdx) ? ConstZero :
Call->Arguments[Idx];
2880 "Only pointer types are supported for loading values");
2894 Register GlobalWorkSize = CreateDataRegister(GlobalWorkSizeArgIdx);
2895 Register LocalWorkSize = CreateDataRegister(LocalWorkSizeArgIdx);
2896 Register GlobalWorkOffset = CreateDataRegister(GlobalWorkOffsetArgIdx);
2899 return MIRBuilder.
buildInstr(SPIRV::OpBuildNDRange)
2904 .
addUse(GlobalWorkOffset);
2921 .
addUse(GlobalWorkOffset);
2935 SPIRV::AccessQualifier::ReadWrite,
true);
2943 bool IsSpirvOp =
Call->isSpirvOp();
2944 bool HasEvents =
Call->Builtin->Name.contains(
"events") || IsSpirvOp;
2951 if (
Call->Builtin->Name.contains(
"_varargs") || IsSpirvOp) {
2952 const unsigned LocalSizeArrayIdx = HasEvents ? 9 : 6;
2960 assert(LocalSizeTy &&
"Local size type is expected");
2966 Int32Ty, MIRBuilder, SPIRV::StorageClass::Function);
2967 for (
unsigned I = 0;
I < LocalSizeNum; ++
I) {
2983 auto MIB = MIRBuilder.
buildInstr(SPIRV::OpEnqueueKernel)
2988 const unsigned BlockFIdx = HasEvents ? 6 : 3;
2989 for (
unsigned i = 0; i < BlockFIdx; i++)
2990 MIB.addUse(
Call->Arguments[i]);
2997 MIB.addUse(NullPtr);
2998 MIB.addUse(NullPtr);
3006 Register BlockLiteralReg =
Call->Arguments[BlockFIdx + 1];
3008 MIB.addUse(BlockLiteralReg);
3018 for (
unsigned i = 0; i < LocalSizes.
size(); i++)
3019 MIB.addUse(LocalSizes[i]);
3029 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
3032 case SPIRV::OpRetainEvent:
3033 case SPIRV::OpReleaseEvent:
3035 case SPIRV::OpCreateUserEvent:
3036 case SPIRV::OpGetDefaultQueue:
3040 case SPIRV::OpIsValidEvent:
3045 case SPIRV::OpSetUserEventStatus:
3049 case SPIRV::OpCaptureEventProfilingInfo:
3054 case SPIRV::OpBuildNDRange:
3056 case SPIRV::OpEnqueueKernel:
3069 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
3071 bool IsSet = Opcode == SPIRV::OpGroupAsyncCopy;
3073 if (
Call->isSpirvOp())
3080 case SPIRV::OpGroupAsyncCopy: {
3082 Call->ReturnType->getOpcode() == SPIRV::OpTypeEvent
3086 unsigned NumArgs =
Call->Arguments.size();
3096 ?
Call->Arguments[3]
3104 case SPIRV::OpGroupWaitEvents:
3120 SPIRV::lookupConvertBuiltin(
Call->Builtin->Name,
Call->Builtin->Set);
3122 if (!Builtin &&
Call->isSpirvOp()) {
3125 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
3130 assert(Builtin &&
"Conversion builtin not found.");
3133 SPIRV::Decoration::SaturatedConversion, {});
3136 bool AnyTypeIsFloat =
3143 if (AnyTypeIsFloat) {
3145 SPIRV::Decoration::FPRoundingMode,
3146 {(unsigned)Builtin->RoundingMode});
3150 std::string NeedExtMsg;
3151 bool IsRightComponentsNumber =
true;
3152 unsigned Opcode = SPIRV::OpNop;
3159 : SPIRV::OpSatConvertSToU;
3162 : SPIRV::OpSConvert;
3164 SPIRV::OpTypeFloat)) {
3168 &MIRBuilder.
getMF().getSubtarget());
3169 if (!ST->canUseExtension(
3170 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
3171 NeedExtMsg =
"SPV_INTEL_bfloat16_conversion";
3172 IsRightComponentsNumber =
3175 Opcode = SPIRV::OpConvertBF16ToFINTEL;
3177 bool IsSourceSigned =
3179 Opcode = IsSourceSigned ? SPIRV::OpConvertSToF : SPIRV::OpConvertUToF;
3183 SPIRV::OpTypeFloat)) {
3189 &MIRBuilder.
getMF().getSubtarget());
3190 if (!ST->canUseExtension(
3191 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
3192 NeedExtMsg =
"SPV_INTEL_bfloat16_conversion";
3193 IsRightComponentsNumber =
3196 Opcode = SPIRV::OpConvertFToBF16INTEL;
3199 : SPIRV::OpConvertFToU;
3202 SPIRV::OpTypeFloat)) {
3205 &MIRBuilder.
getMF().getSubtarget());
3206 if (!ST->canUseExtension(
3207 SPIRV::Extension::SPV_INTEL_tensor_float32_conversion))
3208 NeedExtMsg =
"SPV_INTEL_tensor_float32_conversion";
3209 IsRightComponentsNumber =
3212 Opcode = SPIRV::OpRoundFToTF32INTEL;
3215 Opcode = SPIRV::OpFConvert;
3220 if (!NeedExtMsg.empty()) {
3221 std::string DiagMsg = std::string(Builtin->
Name) +
3222 ": the builtin requires the following SPIR-V "
3227 if (!IsRightComponentsNumber) {
3228 std::string DiagMsg =
3229 std::string(Builtin->
Name) +
3230 ": result and argument must have the same number of components";
3233 assert(Opcode != SPIRV::OpNop &&
3234 "Conversion between the types not implemented!");
3248 SPIRV::lookupVectorLoadStoreBuiltin(
Call->Builtin->Name,
3249 Call->Builtin->Set);
3255 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
3272 const auto *Builtin =
Call->Builtin;
3273 auto *MRI = MIRBuilder.
getMRI();
3275 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
3281 LLT PtrTy = MRI->getType(
Call->Arguments[0]);
3282 DestReg = MRI->createGenericVirtualRegister(PtrTy);
3283 MRI->setRegClass(DestReg, &SPIRV::pIDRegClass);
3286 MIB.addDef(DestReg);
3289 MIB.addDef(
Call->ReturnRegister);
3292 for (
unsigned i = IsVoid ? 1 : 0; i <
Call->Arguments.size(); ++i) {
3295 if (
DefMI->getOpcode() == TargetOpcode::G_CONSTANT &&
3296 DefMI->getOperand(1).isCImm()) {
3303 LLT PtrTy = MRI->getType(
Call->Arguments[0]);
3318 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
3319 bool IsLoad = Opcode == SPIRV::OpLoad;
3323 MIB.addDef(
Call->ReturnRegister);
3331 MIB.addUse(
Call->Arguments[1]);
3333 unsigned NumArgs =
Call->Arguments.size();
3334 if ((IsLoad && NumArgs >= 2) || NumArgs >= 3)
3336 if ((IsLoad && NumArgs >= 3) || NumArgs >= 4)
3349std::tuple<int, unsigned, unsigned>
3351 SPIRV::InstructionSet::InstructionSet Set) {
3354 std::unique_ptr<const IncomingCall>
Call =
3357 return std::make_tuple(-1, 0, 0);
3359 switch (
Call->Builtin->Group) {
3360 case SPIRV::Relational:
3362 case SPIRV::Barrier:
3363 case SPIRV::CastToPtr:
3364 case SPIRV::ImageMiscQuery:
3365 case SPIRV::SpecConstant:
3366 case SPIRV::Enqueue:
3367 case SPIRV::AsyncCopy:
3368 case SPIRV::LoadStore:
3369 case SPIRV::CoopMatr:
3370 case SPIRV::Arithmetic:
3372 SPIRV::lookupNativeBuiltin(
Call->Builtin->Name,
Call->Builtin->Set))
3373 return std::make_tuple(
Call->Builtin->Group, R->Opcode, 0);
3375 case SPIRV::Extended:
3376 if (
const auto *R = SPIRV::lookupExtendedBuiltin(
Call->Builtin->Name,
3377 Call->Builtin->Set))
3378 return std::make_tuple(
Call->Builtin->Group, 0, R->Number);
3380 case SPIRV::VectorLoadStore:
3381 if (
const auto *R = SPIRV::lookupVectorLoadStoreBuiltin(
Call->Builtin->Name,
3382 Call->Builtin->Set))
3383 return std::make_tuple(SPIRV::Extended, 0, R->Number);
3386 if (
const auto *R = SPIRV::lookupGroupBuiltin(
Call->Builtin->Name))
3387 return std::make_tuple(
Call->Builtin->Group, R->Opcode, 0);
3389 case SPIRV::AtomicFloating:
3390 if (
const auto *R = SPIRV::lookupAtomicFloatingBuiltin(
Call->Builtin->Name))
3391 return std::make_tuple(
Call->Builtin->Group, R->Opcode, 0);
3393 case SPIRV::IntelSubgroups:
3394 if (
const auto *R = SPIRV::lookupIntelSubgroupsBuiltin(
Call->Builtin->Name))
3395 return std::make_tuple(
Call->Builtin->Group, R->Opcode, 0);
3397 case SPIRV::GroupUniform:
3398 if (
const auto *R = SPIRV::lookupGroupUniformBuiltin(
Call->Builtin->Name))
3399 return std::make_tuple(
Call->Builtin->Group, R->Opcode, 0);
3401 case SPIRV::IntegerDot:
3403 SPIRV::lookupIntegerDotProductBuiltin(
Call->Builtin->Name))
3404 return std::make_tuple(
Call->Builtin->Group, R->Opcode, 0);
3406 case SPIRV::WriteImage:
3407 return std::make_tuple(
Call->Builtin->Group, SPIRV::OpImageWrite, 0);
3409 return std::make_tuple(
Call->Builtin->Group, TargetOpcode::G_SELECT, 0);
3410 case SPIRV::Construct:
3411 return std::make_tuple(
Call->Builtin->Group, SPIRV::OpCompositeConstruct,
3413 case SPIRV::KernelClock:
3414 return std::make_tuple(
Call->Builtin->Group, SPIRV::OpReadClockKHR, 0);
3416 return std::make_tuple(-1, 0, 0);
3418 return std::make_tuple(-1, 0, 0);
3422 SPIRV::InstructionSet::InstructionSet Set,
3427 LLVM_DEBUG(
dbgs() <<
"Lowering builtin call: " << DemangledCall <<
"\n");
3431 assert(SpvType &&
"Inconsistent return register: expected valid type info");
3432 std::unique_ptr<const IncomingCall>
Call =
3437 return std::nullopt;
3442 if (Args.size() <
Call->Builtin->MinNumArgs) {
3443 LLVM_DEBUG(
dbgs() <<
"Too few arguments for builtin " << DemangledCall
3444 <<
": expected at least " <<
Call->Builtin->MinNumArgs
3445 <<
", got " << Args.size()
3446 <<
"; treating as a normal function\n");
3447 return std::nullopt;
3449 if (
Call->Builtin->MaxNumArgs && Args.size() >
Call->Builtin->MaxNumArgs) {
3450 LLVM_DEBUG(
dbgs() <<
"Too many arguments for builtin " << DemangledCall
3451 <<
": expected at most " <<
Call->Builtin->MaxNumArgs
3452 <<
", got " << Args.size()
3453 <<
"; treating as a normal function\n");
3454 return std::nullopt;
3458 switch (
Call->Builtin->Group) {
3459 case SPIRV::Extended:
3461 case SPIRV::Relational:
3465 case SPIRV::Variable:
3469 case SPIRV::AtomicFloating:
3471 case SPIRV::Barrier:
3473 case SPIRV::CastToPtr:
3476 case SPIRV::IntegerDot:
3480 case SPIRV::ICarryBorrow:
3482 case SPIRV::MulExtended:
3484 case SPIRV::Arithmetic:
3486 case SPIRV::GetQuery:
3488 case SPIRV::ImageSizeQuery:
3490 case SPIRV::ImageMiscQuery:
3492 case SPIRV::ReadImage:
3494 case SPIRV::WriteImage:
3496 case SPIRV::SampleImage:
3500 case SPIRV::Construct:
3502 case SPIRV::SpecConstant:
3504 case SPIRV::Enqueue:
3506 case SPIRV::AsyncCopy:
3508 case SPIRV::Convert:
3510 case SPIRV::VectorLoadStore:
3512 case SPIRV::LoadStore:
3514 case SPIRV::IntelSubgroups:
3516 case SPIRV::GroupUniform:
3518 case SPIRV::KernelClock:
3520 case SPIRV::CoopMatr:
3522 case SPIRV::ExtendedBitOps:
3524 case SPIRV::BindlessINTEL:
3526 case SPIRV::TernaryBitwiseINTEL:
3528 case SPIRV::Block2DLoadStore:
3532 case SPIRV::PredicatedLoadStore:
3534 case SPIRV::BlockingPipes:
3536 case SPIRV::ArbitraryPrecisionFixedPoint:
3538 case SPIRV::ImageChannelDataTypes:
3540 case SPIRV::ArbitraryFloatingPoint:
3551 [[maybe_unused]]
bool IsOCLBuiltinType = TypeStr.
consume_front(
"ocl_");
3552 assert(IsOCLBuiltinType &&
"Invalid OpenCL builtin prefix");
3569 unsigned VecElts = 0;
3580 TypeStr = TypeStr.
substr(0, TypeStr.
find(
']'));
3592 auto Pos1 = DemangledCall.
find(
'(');
3595 auto Pos2 = DemangledCall.
find(
')');
3598 DemangledCall.
slice(Pos1 + 1, Pos2)
3599 .
split(BuiltinArgsTypeStrs,
',', -1,
false);
3607 if (ArgIdx >= BuiltinArgsTypeStrs.
size())
3609 StringRef TypeStr = BuiltinArgsTypeStrs[ArgIdx].trim();
3618#define GET_BuiltinTypes_DECL
3619#define GET_BuiltinTypes_IMPL
3626#define GET_OpenCLTypes_DECL
3627#define GET_OpenCLTypes_IMPL
3629#include "SPIRVGenTables.inc"
3637 if (Name.starts_with(
"void"))
3639 else if (Name.starts_with(
"int") || Name.starts_with(
"uint"))
3641 else if (Name.starts_with(
"float"))
3643 else if (Name.starts_with(
"half"))
3656 unsigned Opcode = TypeRecord->
Opcode;
3671 "Invalid number of parameters for SPIR-V pipe builtin!");
3674 SPIRV::AccessQualifier::AccessQualifier(
3682 "Invalid number of parameters for SPIR-V coop matrices builtin!");
3684 "SPIR-V coop matrices builtin type must have a type parameter!");
3687 SPIRV::AccessQualifier::ReadWrite,
true);
3690 MIRBuilder, ExtensionType, ElemType, ExtensionType->
getIntParameter(0),
3699 OpaqueType, SPIRV::AccessQualifier::ReadOnly, MIRBuilder);
3708 "Inline SPIR-V type builtin takes an opcode, size, and alignment "
3715 if (ParamEType->getName() ==
"spirv.IntegralConstant") {
3716 assert(ParamEType->getNumTypeParameters() == 1 &&
3717 "Inline SPIR-V integral constant builtin must have a type "
3719 assert(ParamEType->getNumIntParameters() == 1 &&
3720 "Inline SPIR-V integral constant builtin must have a "
3723 auto OperandValue = ParamEType->getIntParameter(0);
3724 auto *OperandType = ParamEType->getTypeParameter(0);
3727 OperandType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
true);
3730 OperandValue, MIRBuilder, OperandSPIRVType,
true)));
3732 }
else if (ParamEType->getName() ==
"spirv.Literal") {
3733 assert(ParamEType->getNumTypeParameters() == 0 &&
3734 "Inline SPIR-V literal builtin does not take type "
3736 assert(ParamEType->getNumIntParameters() == 1 &&
3737 "Inline SPIR-V literal builtin must have an integer "
3740 auto OperandValue = ParamEType->getIntParameter(0);
3747 Param, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
true);
3759 "Vulkan buffers have exactly one type for the type of the buffer.");
3761 "Vulkan buffer have 2 integer parameters: storage class and is "
3765 auto SC =
static_cast<SPIRV::StorageClass::StorageClass
>(
3776 "Vulkan push constants have exactly one type as argument.");
3790 StringRef NameWithParameters = TypeName;
3797 SPIRV::lookupOpenCLType(NameWithParameters);
3800 NameWithParameters);
3808 "Unknown builtin opaque type!");
3812 if (!NameWithParameters.
contains(
'_'))
3816 unsigned BaseNameLength = NameWithParameters.
find(
'_') - 1;
3820 bool HasTypeParameter = !
isDigit(Parameters[0][0]);
3821 if (HasTypeParameter)
3824 for (
unsigned i = HasTypeParameter ? 1 : 0; i < Parameters.size(); i++) {
3825 unsigned IntParameter = 0;
3826 bool ValidLiteral = !Parameters[i].getAsInteger(10, IntParameter);
3829 "Invalid format of SPIR-V builtin parameter literal!");
3833 NameWithParameters.
substr(0, BaseNameLength),
3834 TypeParameters, IntParameters);
3839 SPIRV::AccessQualifier::AccessQualifier AccessQual,
3860 if (Name ==
"spirv.Type") {
3862 }
else if (Name ==
"spirv.VulkanBuffer") {
3864 }
else if (Name ==
"spirv.Padding") {
3866 }
else if (Name ==
"spirv.PushConstant") {
3868 }
else if (Name ==
"spirv.Layout") {
3882 switch (TypeRecord->
Opcode) {
3883 case SPIRV::OpTypeImage:
3886 case SPIRV::OpTypePipe:
3889 case SPIRV::OpTypeDeviceEvent:
3892 case SPIRV::OpTypeSampler:
3895 case SPIRV::OpTypeSampledImage:
3898 case SPIRV::OpTypeCooperativeMatrixKHR:
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Lower Kernel Arguments
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Promote Memory to Register
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
static const fltSemantics & IEEEsingle()
APInt bitcastToAPInt() const
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
uint64_t getZExtValue() const
Get zero extended value.
This class represents an incoming formal argument to a Function.
Represent a constant reference to an array (0 or more elements consecutively in memory),...
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
LLVM_ABI FPClassTest getParamNoFPClass(unsigned i) const
Extract a test mask for disallowed floating-point value classes for the parameter.
LLVM_ABI FPClassTest getRetNoFPClass() const
Extract a test mask for disallowed floating-point value classes for the return value.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
@ ICMP_ULT
unsigned less than
const APFloat & getValueAPF() const
const APInt & getValue() const
Return the constant as an APInt value reference.
A parsed version of the target data layout string in and methods for querying it.
Tagged union holding either a T or a Error.
Class to represent fixed width SIMD vectors.
Class to represent function types.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Type * getParamType(unsigned i) const
Parameter type accessors.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
This is an important class for using LLVM in a threaded context.
static MCOperand createReg(MCRegister Reg)
static MCOperand createImm(int64_t Val)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
LLVMContext & getContext() const
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
const DataLayout & getDataLayout() const
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addDef(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register definition operand.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
unsigned getNumOperands() const
Retuns the total number of operands.
LLVM_ABI void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
void setFlag(MIFlag Flag)
Set a MI flag.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
const GlobalValue * getGlobal() const
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
const MDNode * getMetadata() const
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
LLVM_ABI void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
LLVM_ABI void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
const TargetRegisterClass * getRegClassOrNull(Register Reg) const
Return the register class of Reg, or null if Reg has not been assigned a register class yet.
unsigned getNumVirtRegs() const
getNumVirtRegs - Return the number of virtual registers created.
LLVM_ABI MachineInstr * getUniqueVRegDef(Register Reg) const
getUniqueVRegDef - Return the unique machine instr that defines the specified virtual register or nul...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
SPIRVTypeInst getImageType(const TargetExtType *ExtensionType, const SPIRV::AccessQualifier::AccessQualifier Qualifier, MachineIRBuilder &MIRBuilder)
SPIRVTypeInst getOrCreateOpTypeSampledImage(SPIRVTypeInst ImageType, MachineIRBuilder &MIRBuilder)
void assignSPIRVTypeToVReg(SPIRVTypeInst Type, Register VReg, const MachineFunction &MF)
const TargetRegisterClass * getRegClass(SPIRVTypeInst SpvType) const
unsigned getScalarOrVectorBitWidth(SPIRVTypeInst Type) const
SPIRVTypeInst getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineIRBuilder &MIRBuilder)
SPIRVTypeInst getOrCreateSPIRVVectorType(SPIRVTypeInst BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder, bool EmitIR)
SPIRVTypeInst getOrCreateSPIRVTypeByName(StringRef TypeStr, MachineIRBuilder &MIRBuilder, bool EmitIR, SPIRV::StorageClass::StorageClass SC=SPIRV::StorageClass::Function, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite)
Register buildGlobalVariable(Register Reg, SPIRVTypeInst BaseType, StringRef Name, const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage, const MachineInstr *Init, bool IsConst, const std::optional< SPIRV::LinkageType::LinkageType > &LinkageType, MachineIRBuilder &MIRBuilder, bool IsInstSelector)
SPIRVTypeInst getOrCreateOpTypeByOpcode(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode)
unsigned getScalarOrVectorComponentCount(Register VReg) const
const Type * getTypeForSPIRVType(SPIRVTypeInst Ty) const
SPIRVTypeInst getOrCreatePaddingType(MachineIRBuilder &MIRBuilder)
unsigned getPointerSize() const
LLT getRegType(SPIRVTypeInst SpvType) const
SPIRVTypeInst getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder, bool EmitIR)
SPIRVTypeInst getOrCreateSPIRVPointerType(const Type *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SC)
bool isScalarOfType(Register VReg, unsigned TypeOpcode) const
Register getSPIRVTypeID(SPIRVTypeInst SpirvType) const
Register getOrCreateConstIntArray(uint64_t Val, size_t Num, MachineInstr &I, SPIRVTypeInst SpvType, const SPIRVInstrInfo &TII)
SPIRVTypeInst getOrCreateOpTypeCoopMatr(MachineIRBuilder &MIRBuilder, const TargetExtType *ExtensionType, SPIRVTypeInst ElemType, uint32_t Scope, uint32_t Rows, uint32_t Columns, uint32_t Use, bool EmitIR)
SPIRVTypeInst getOrCreateUnknownType(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode, const ArrayRef< MCOperand > Operands)
Register buildConstantFP(APFloat Val, MachineIRBuilder &MIRBuilder, SPIRVTypeInst SpvType=nullptr)
SPIRVTypeInst getOrCreateOpTypePipe(MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AccQual)
SPIRVTypeInst getScalarOrVectorComponentType(SPIRVTypeInst Type) const
SPIRVTypeInst getOrCreateVulkanBufferType(MachineIRBuilder &MIRBuilder, Type *ElemType, SPIRV::StorageClass::StorageClass SC, bool IsWritable, bool EmitIr=false)
SPIRVTypeInst getPointeeType(SPIRVTypeInst PtrType)
SPIRVTypeInst getOrCreateSPIRVType(const Type *Type, MachineInstr &I, SPIRV::AccessQualifier::AccessQualifier AQ, bool EmitIR)
Register getOrCreateConsIntVector(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVTypeInst SpvType, bool EmitIR)
bool isScalarOrVectorOfType(Register VReg, unsigned TypeOpcode) const
SPIRVTypeInst getOrCreateLayoutType(MachineIRBuilder &MIRBuilder, const TargetExtType *T, bool EmitIr=false)
Register getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, SPIRVTypeInst SpvType)
SPIRVTypeInst getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
SPIRVTypeInst getOrCreateOpTypeSampler(MachineIRBuilder &MIRBuilder)
SPIRV::StorageClass::StorageClass getPointerStorageClass(Register VReg) const
Register buildConstantSampler(Register Res, unsigned AddrMode, unsigned Param, unsigned FilerMode, MachineIRBuilder &MIRBuilder)
Register buildConstantInt(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVTypeInst SpvType, bool EmitIR, bool ZeroAsNull=true)
SPIRVTypeInst getOrCreateVulkanPushConstantType(MachineIRBuilder &MIRBuilder, Type *ElemType)
SPIRVTypeInst getOrCreateOpTypeDeviceEvent(MachineIRBuilder &MIRBuilder)
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Represent a constant reference to a string, i.e.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
static constexpr size_t npos
bool consume_back(StringRef Suffix)
Returns true if this StringRef has the given suffix and removes that suffix.
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
std::string str() const
Get the contents as an std::string.
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
bool contains_insensitive(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
constexpr size_t size() const
Get the string size.
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
size_t find_first_of(char C, size_t From=0) const
Find the first character in the string that is C, or npos if not found.
size_t find(char C, size_t From=0) const
Search for the first character C in the string.
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
bool consume_front(char Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
A switch()-like statement whose cases are string literals.
StringSwitch & EndsWith(StringLiteral S, T Value)
Class to represent target extensions types, which are generally unintrospectable from target-independ...
ArrayRef< Type * > type_params() const
Return the type parameters for this particular target extension type.
unsigned getNumIntParameters() const
static LLVM_ABI TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types={}, ArrayRef< unsigned > Ints={})
Return a target extension type having the specified name and optional type and integer parameters.
Type * getTypeParameter(unsigned i) const
unsigned getNumTypeParameters() const
unsigned getIntParameter(unsigned i) const
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
LLVM_ABI StringRef getStructName() const
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
bool isVoidTy() const
Return true if this is 'void'.
LLVM Value Representation.
LLVM_ABI Value(Type *Ty, unsigned scid)
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Represents a version number in the form major[.minor[.subminor[.build]]].
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
LLVM_C_ABI LLVMTypeRef LLVMVectorType(LLVMTypeRef ElementType, unsigned ElementCount)
Create a vector type that contains a defined type and has a specific number of elements.
std::string lookupBuiltinNameHelper(StringRef DemangledCall, FPDecorationId *DecorationId)
Parses the name part of the demangled builtin call.
Type * parseBuiltinCallArgumentType(StringRef TypeStr, LLVMContext &Ctx)
bool parseBuiltinTypeStr(SmallVector< StringRef, 10 > &BuiltinArgsTypeStrs, const StringRef DemangledCall, LLVMContext &Ctx)
std::optional< bool > lowerBuiltin(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, MachineIRBuilder &MIRBuilder, const Register OrigRet, const Type *OrigRetTy, const SmallVectorImpl< Register > &Args, SPIRVGlobalRegistry *GR, const CallBase &CB)
std::tuple< int, unsigned, unsigned > mapBuiltinToOpcode(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set)
Helper function for finding a builtin function attributes by a demangled function name.
Type * parseBuiltinCallArgumentBaseType(const StringRef DemangledCall, unsigned ArgIdx, LLVMContext &Ctx)
Parses the provided ArgIdx argument base type in the DemangledCall skeleton.
TargetExtType * parseBuiltinTypeNameToTargetExtType(std::string TypeName, LLVMContext &Context)
Translates a string representing a SPIR-V or OpenCL builtin type to a TargetExtType that can be furth...
SPIRVTypeInst lowerBuiltinType(const Type *OpaqueType, SPIRV::AccessQualifier::AccessQualifier AccessQual, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
static bool build2DBlockIOINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's 2d block io instructions.
static bool generateExtInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, const CallBase &CB)
static void buildSRetInst(unsigned Opcode, Register SRetReg, Register Op1Reg, Register Op2Reg, SPIRVTypeInst RetType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateBindlessImageINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateGetQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateConstructInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicFlagInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic flag instructions (e.g.
static bool generateImageSizeQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::SamplerFilterMode::SamplerFilterMode getSamplerFilterModeFromBitmask(unsigned Bitmask)
static bool buildAtomicStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic store instruction.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
static bool buildExtendedBitOpsInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building extended bit operations.
static const Type * getBlockStructType(Register ParamReg, MachineRegisterInfo *MRI)
static bool generateGroupInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
FPDecorationId demangledPostfixToDecorationId(const std::string &S)
static SPIRVTypeInst getSamplerType(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim)
static bool generateImageChannelDataTypeInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool builtinMayNeedPromotionToVec(uint32_t BuiltinNumber)
static std::tuple< Register, SPIRVTypeInst > buildBoolRegister(MachineIRBuilder &MIRBuilder, SPIRVTypeInst ResultType, SPIRVGlobalRegistry *GR)
Helper function building either a resulting scalar or vector bool register depending on the expected ...
Register createVirtualRegister(SPIRVTypeInst SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF)
static bool generateICarryBorrowInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildScopeReg(Register CLScopeRegister, SPIRV::Scope::Scope Scope, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI)
void updateRegType(Register Reg, Type *Ty, SPIRVTypeInst SpirvTy, SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Helper external function for assigning a SPIRV type to a register, ensuring the register class and ty...
static SPIRVTypeInst getInlineSpirvType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
static Register buildConstantIntReg32(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
SmallVector< MachineInstr *, 4 > createContinuedInstructions(MachineIRBuilder &MIRBuilder, unsigned Opcode, unsigned MinWC, unsigned ContinuedOpcode, ArrayRef< Register > Args, Register ReturnRegister, Register TypeID)
static unsigned getNumSizeComponents(SPIRVTypeInst imgType)
Helper function for obtaining the number of size components.
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
constexpr unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
static bool generateSampleImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateBarrierInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVTypeInst getLayoutType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI void SplitString(StringRef Source, SmallVectorImpl< StringRef > &OutFragments, StringRef Delimiters=" \t\n\v\f\r")
SplitString - Split up the specified string according to the specified delimiters,...
static SPIRVTypeInst getVulkanPushConstantType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildImageChannelDataTypeInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateKernelClockInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static void setRegClassIfNull(Register Reg, MachineRegisterInfo *MRI, SPIRVGlobalRegistry *GR)
static bool generateGroupUniformInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateWaveInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
static bool generateMulExtendedInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building barriers, i.e., memory/control ordering operations.
static bool generateAsyncCopy(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildBuiltinVariableLoad(MachineIRBuilder &MIRBuilder, SPIRVTypeInst VariableType, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType, Register Reg=Register(0), bool isConst=true, const std::optional< SPIRV::LinkageType::LinkageType > &LinkageTy={ SPIRV::LinkageType::Import})
Helper function for building a load instruction for loading a builtin global variable of BuiltinValue...
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope)
static bool buildAPFixedPointInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateBlockingPipesInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
static bool generateEnqueueInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
static const Type * getMachineInstrType(MachineInstr *MI)
bool isDigit(char C)
Checks if character C is one of the 10 decimal digits.
static SPIRV::SamplerAddressingMode::SamplerAddressingMode getSamplerAddressingModeFromBitmask(unsigned Bitmask)
static bool generateAtomicInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildLoadInst(SPIRVTypeInst BaseType, Register PtrRegister, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, LLT LowLevelType, Register DestinationReg=Register(0))
Helper function for building a load instruction loading into the DestinationReg.
static bool generateDotOrFMulInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateConvertInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateTernaryBitwiseFunctionINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static std::unique_ptr< const SPIRV::IncomingCall > lookupBuiltin(StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, Register ReturnRegister, SPIRVTypeInst ReturnType, const SmallVectorImpl< Register > &Arguments)
Looks up the demangled builtin call in the SPIRVBuiltins.td records using the provided DemangledCall ...
static bool generateCastToPtrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
constexpr bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
static bool buildSelectInst(MachineIRBuilder &MIRBuilder, Register ReturnRegister, Register SourceRegister, SPIRVTypeInst ReturnType, SPIRVGlobalRegistry *GR)
Helper function for building either a vector or scalar select instruction depending on the expected R...
static Register buildMemSemanticsReg(Register SemanticsRegister, Register PtrRegister, unsigned &Semantics, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static unsigned getConstFromIntrinsic(Register Reg, MachineRegisterInfo *MRI)
static SPIRVTypeInst getOrCreateSPIRVDeviceEventPointer(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateImageMiscQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSelectInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic load instruction.
static bool generateIntelSubgroupsInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVTypeInst getCoopMatrType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateExtendedBitOpsInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildPipeInst(const SPIRV::IncomingCall *Call, unsigned Opcode, unsigned Scope, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSpecConstantInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * parseBasicTypeName(StringRef &TypeName, LLVMContext &Ctx)
static bool generateVectorLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, uint64_t DefaultValue)
static bool generateCoopMatrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SmallVector< Register > getBuiltinCallArguments(const SPIRV::IncomingCall *Call, uint32_t BuiltinNumber, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVTypeInst getNonParameterizedType(const TargetExtType *ExtensionType, const SPIRV::BuiltinType *TypeRecord, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildBindlessImageINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's bindless image instructions.
static bool buildAtomicFloatingRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic floating-type instruction.
MachineInstr * getDefInstrMaybeConstant(Register &ConstReg, const MachineRegisterInfo *MRI)
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
static bool generate2DBlockIOINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateReadImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool hasBuiltinTypePrefix(StringRef Name)
static bool buildEnqueueKernel(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * getMDOperandAsType(const MDNode *N, unsigned I)
static bool generatePipeInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildTernaryBitwiseFunctionINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's OpBitwiseFunctionINTEL instruction.
static bool generateAPFixedPointInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic instructions.
static SPIRV::MemorySemantics::MemorySemantics getSPIRVMemSemantics(std::memory_order MemOrder)
static bool generateRelationalInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVTypeInst getPipeType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicInitInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
Helper function for translating atomic init to OpStore.
static bool generateWriteImageInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Type * parseTypeString(const StringRef Name, LLVMContext &Context)
static bool generateArithmeticInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
static bool generatePredicatedLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateAtomicFloatingInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateAFPInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildNDRange(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static MachineInstr * getBlockStructInstr(Register ParamReg, MachineRegisterInfo *MRI)
static SPIRVTypeInst getSampledImageType(const TargetExtType *OpaqueType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode, const SPIRV::IncomingCall *Call, Register TypeReg, ArrayRef< uint32_t > ImmArgs={})
static unsigned getSamplerParamFromBitmask(unsigned Bitmask)
static SPIRVTypeInst getVulkanBufferType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicCompareExchangeInst(const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic compare-exchange instruction.
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
static bool generateBuiltinVar(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Implement std::hash so that hash_code can be used in STL containers.
This struct is a compact representation of a valid (non-zero power of two) alignment.
This class contains a discriminated union of information about pointers in memory operands,...
FPRoundingMode::FPRoundingMode RoundingMode
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
const SmallVectorImpl< Register > & Arguments
const SPIRVTypeInst ReturnType
IncomingCall(const std::string BuiltinName, const DemangledBuiltin *Builtin, const Register ReturnRegister, SPIRVTypeInst ReturnType, const SmallVectorImpl< Register > &Arguments)
const std::string BuiltinName
const Register ReturnRegister
const DemangledBuiltin * Builtin
InstructionSet::InstructionSet Set
StringRef SpirvTypeLiteral
InstructionSet::InstructionSet Set
FPRoundingMode::FPRoundingMode RoundingMode