LLVM 22.0.0git
SPIRVBuiltins.cpp
Go to the documentation of this file.
1//===- SPIRVBuiltins.cpp - SPIR-V Built-in Functions ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements lowering builtin function calls and types using their
10// demangled names and TableGen records.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SPIRVBuiltins.h"
15#include "SPIRV.h"
16#include "SPIRVSubtarget.h"
17#include "SPIRVUtils.h"
20#include "llvm/IR/IntrinsicsSPIRV.h"
21#include <regex>
22#include <string>
23#include <tuple>
24
25#define DEBUG_TYPE "spirv-builtins"
26
27namespace llvm {
28namespace SPIRV {
29#define GET_BuiltinGroup_DECL
30#include "SPIRVGenTables.inc"
31
34 InstructionSet::InstructionSet Set;
35 BuiltinGroup Group;
38};
39
40#define GET_DemangledBuiltins_DECL
41#define GET_DemangledBuiltins_IMPL
42
60
63 InstructionSet::InstructionSet Set;
65};
66
67#define GET_NativeBuiltins_DECL
68#define GET_NativeBuiltins_IMPL
69
85
86#define GET_GroupBuiltins_DECL
87#define GET_GroupBuiltins_IMPL
88
96
97#define GET_IntelSubgroupsBuiltins_DECL
98#define GET_IntelSubgroupsBuiltins_IMPL
99
104
105#define GET_AtomicFloatingBuiltins_DECL
106#define GET_AtomicFloatingBuiltins_IMPL
112
113#define GET_GroupUniformBuiltins_DECL
114#define GET_GroupUniformBuiltins_IMPL
115
118 InstructionSet::InstructionSet Set;
119 BuiltIn::BuiltIn Value;
120};
121
122using namespace BuiltIn;
123#define GET_GetBuiltins_DECL
124#define GET_GetBuiltins_IMPL
125
128 InstructionSet::InstructionSet Set;
130};
131
132#define GET_ImageQueryBuiltins_DECL
133#define GET_ImageQueryBuiltins_IMPL
134
140
141#define GET_IntegerDotProductBuiltins_DECL
142#define GET_IntegerDotProductBuiltins_IMPL
143
146 InstructionSet::InstructionSet Set;
151 bool IsTF32;
152 FPRoundingMode::FPRoundingMode RoundingMode;
153};
154
157 InstructionSet::InstructionSet Set;
161 FPRoundingMode::FPRoundingMode RoundingMode;
162};
163
164using namespace FPRoundingMode;
165#define GET_ConvertBuiltins_DECL
166#define GET_ConvertBuiltins_IMPL
167
168using namespace InstructionSet;
169#define GET_VectorLoadStoreBuiltins_DECL
170#define GET_VectorLoadStoreBuiltins_IMPL
171
172#define GET_CLMemoryScope_DECL
173#define GET_CLSamplerAddressingMode_DECL
174#define GET_CLMemoryFenceFlags_DECL
175#define GET_ExtendedBuiltins_DECL
176#include "SPIRVGenTables.inc"
177} // namespace SPIRV
178
179//===----------------------------------------------------------------------===//
180// Misc functions for looking up builtins and veryfying requirements using
181// TableGen records
182//===----------------------------------------------------------------------===//
183
184namespace SPIRV {
185/// Parses the name part of the demangled builtin call.
186std::string lookupBuiltinNameHelper(StringRef DemangledCall,
187 FPDecorationId *DecorationId) {
188 StringRef PassPrefix = "(anonymous namespace)::";
189 std::string BuiltinName;
190 // Itanium Demangler result may have "(anonymous namespace)::" prefix
191 if (DemangledCall.starts_with(PassPrefix))
192 BuiltinName = DemangledCall.substr(PassPrefix.size());
193 else
194 BuiltinName = DemangledCall;
195 // Extract the builtin function name and types of arguments from the call
196 // skeleton.
197 BuiltinName = BuiltinName.substr(0, BuiltinName.find('('));
198
199 // Account for possible "__spirv_ocl_" prefix in SPIR-V friendly LLVM IR
200 if (BuiltinName.rfind("__spirv_ocl_", 0) == 0)
201 BuiltinName = BuiltinName.substr(12);
202
203 // Check if the extracted name contains type information between angle
204 // brackets. If so, the builtin is an instantiated template - needs to have
205 // the information after angle brackets and return type removed.
206 std::size_t Pos1 = BuiltinName.rfind('<');
207 if (Pos1 != std::string::npos && BuiltinName.back() == '>') {
208 std::size_t Pos2 = BuiltinName.rfind(' ', Pos1);
209 if (Pos2 == std::string::npos)
210 Pos2 = 0;
211 else
212 ++Pos2;
213 BuiltinName = BuiltinName.substr(Pos2, Pos1 - Pos2);
214 BuiltinName = BuiltinName.substr(BuiltinName.find_last_of(' ') + 1);
215 }
216
217 // Check if the extracted name begins with:
218 // - "__spirv_ImageSampleExplicitLod"
219 // - "__spirv_ImageRead"
220 // - "__spirv_ImageWrite"
221 // - "__spirv_ImageQuerySizeLod"
222 // - "__spirv_UDotKHR"
223 // - "__spirv_SDotKHR"
224 // - "__spirv_SUDotKHR"
225 // - "__spirv_SDotAccSatKHR"
226 // - "__spirv_UDotAccSatKHR"
227 // - "__spirv_SUDotAccSatKHR"
228 // - "__spirv_ReadClockKHR"
229 // - "__spirv_SubgroupBlockReadINTEL"
230 // - "__spirv_SubgroupImageBlockReadINTEL"
231 // - "__spirv_SubgroupImageMediaBlockReadINTEL"
232 // - "__spirv_SubgroupImageMediaBlockWriteINTEL"
233 // - "__spirv_Convert"
234 // - "__spirv_Round"
235 // - "__spirv_UConvert"
236 // - "__spirv_SConvert"
237 // - "__spirv_FConvert"
238 // - "__spirv_SatConvert"
239 // and maybe contains return type information at the end "_R<type>".
240 // If so, extract the plain builtin name without the type information.
241 static const std::regex SpvWithR(
242 "(__spirv_(ImageSampleExplicitLod|ImageRead|ImageWrite|ImageQuerySizeLod|"
243 "UDotKHR|"
244 "SDotKHR|SUDotKHR|SDotAccSatKHR|UDotAccSatKHR|SUDotAccSatKHR|"
245 "ReadClockKHR|SubgroupBlockReadINTEL|SubgroupImageBlockReadINTEL|"
246 "SubgroupImageMediaBlockReadINTEL|SubgroupImageMediaBlockWriteINTEL|"
247 "Convert|Round|"
248 "UConvert|SConvert|FConvert|SatConvert)[^_]*)(_R[^_]*_?(\\w+)?.*)?");
249 std::smatch Match;
250 if (std::regex_match(BuiltinName, Match, SpvWithR) && Match.size() > 1) {
251 std::ssub_match SubMatch;
252 if (DecorationId && Match.size() > 3) {
253 SubMatch = Match[4];
254 *DecorationId = demangledPostfixToDecorationId(SubMatch.str());
255 }
256 SubMatch = Match[1];
257 BuiltinName = SubMatch.str();
258 }
259
260 return BuiltinName;
261}
262} // namespace SPIRV
263
264/// Looks up the demangled builtin call in the SPIRVBuiltins.td records using
265/// the provided \p DemangledCall and specified \p Set.
266///
267/// The lookup follows the following algorithm, returning the first successful
268/// match:
269/// 1. Search with the plain demangled name (expecting a 1:1 match).
270/// 2. Search with the prefix before or suffix after the demangled name
271/// signyfying the type of the first argument.
272///
273/// \returns Wrapper around the demangled call and found builtin definition.
274static std::unique_ptr<const SPIRV::IncomingCall>
276 SPIRV::InstructionSet::InstructionSet Set,
277 Register ReturnRegister, const SPIRVType *ReturnType,
279 std::string BuiltinName = SPIRV::lookupBuiltinNameHelper(DemangledCall);
280
281 SmallVector<StringRef, 10> BuiltinArgumentTypes;
282 StringRef BuiltinArgs =
283 DemangledCall.slice(DemangledCall.find('(') + 1, DemangledCall.find(')'));
284 BuiltinArgs.split(BuiltinArgumentTypes, ',', -1, false);
285
286 // Look up the builtin in the defined set. Start with the plain demangled
287 // name, expecting a 1:1 match in the defined builtin set.
288 const SPIRV::DemangledBuiltin *Builtin;
289 if ((Builtin = SPIRV::lookupBuiltin(BuiltinName, Set)))
290 return std::make_unique<SPIRV::IncomingCall>(
291 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
292
293 // If the initial look up was unsuccessful and the demangled call takes at
294 // least 1 argument, add a prefix or suffix signifying the type of the first
295 // argument and repeat the search.
296 if (BuiltinArgumentTypes.size() >= 1) {
297 char FirstArgumentType = BuiltinArgumentTypes[0][0];
298 // Prefix to be added to the builtin's name for lookup.
299 // For example, OpenCL "abs" taking an unsigned value has a prefix "u_".
300 std::string Prefix;
301
302 switch (FirstArgumentType) {
303 // Unsigned:
304 case 'u':
305 if (Set == SPIRV::InstructionSet::OpenCL_std)
306 Prefix = "u_";
307 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
308 Prefix = "u";
309 break;
310 // Signed:
311 case 'c':
312 case 's':
313 case 'i':
314 case 'l':
315 if (Set == SPIRV::InstructionSet::OpenCL_std)
316 Prefix = "s_";
317 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
318 Prefix = "s";
319 break;
320 // Floating-point:
321 case 'f':
322 case 'd':
323 case 'h':
324 if (Set == SPIRV::InstructionSet::OpenCL_std ||
325 Set == SPIRV::InstructionSet::GLSL_std_450)
326 Prefix = "f";
327 break;
328 }
329
330 // If argument-type name prefix was added, look up the builtin again.
331 if (!Prefix.empty() &&
332 (Builtin = SPIRV::lookupBuiltin(Prefix + BuiltinName, Set)))
333 return std::make_unique<SPIRV::IncomingCall>(
334 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
335
336 // If lookup with a prefix failed, find a suffix to be added to the
337 // builtin's name for lookup. For example, OpenCL "group_reduce_max" taking
338 // an unsigned value has a suffix "u".
339 std::string Suffix;
340
341 switch (FirstArgumentType) {
342 // Unsigned:
343 case 'u':
344 Suffix = "u";
345 break;
346 // Signed:
347 case 'c':
348 case 's':
349 case 'i':
350 case 'l':
351 Suffix = "s";
352 break;
353 // Floating-point:
354 case 'f':
355 case 'd':
356 case 'h':
357 Suffix = "f";
358 break;
359 }
360
361 // If argument-type name suffix was added, look up the builtin again.
362 if (!Suffix.empty() &&
363 (Builtin = SPIRV::lookupBuiltin(BuiltinName + Suffix, Set)))
364 return std::make_unique<SPIRV::IncomingCall>(
365 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
366 }
367
368 // No builtin with such name was found in the set.
369 return nullptr;
370}
371
374 // We expect the following sequence of instructions:
375 // %0:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.alloca)
376 // or = G_GLOBAL_VALUE @block_literal_global
377 // %1:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.bitcast), %0
378 // %2:_(p4) = G_ADDRSPACE_CAST %1:_(pN)
379 MachineInstr *MI = MRI->getUniqueVRegDef(ParamReg);
380 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST &&
381 MI->getOperand(1).isReg());
382 Register BitcastReg = MI->getOperand(1).getReg();
383 MachineInstr *BitcastMI = MRI->getUniqueVRegDef(BitcastReg);
384 assert(isSpvIntrinsic(*BitcastMI, Intrinsic::spv_bitcast) &&
385 BitcastMI->getOperand(2).isReg());
386 Register ValueReg = BitcastMI->getOperand(2).getReg();
387 MachineInstr *ValueMI = MRI->getUniqueVRegDef(ValueReg);
388 return ValueMI;
389}
390
391// Return an integer constant corresponding to the given register and
392// defined in spv_track_constant.
393// TODO: maybe unify with prelegalizer pass.
395 MachineInstr *DefMI = MRI->getUniqueVRegDef(Reg);
396 assert(DefMI->getOpcode() == TargetOpcode::G_CONSTANT &&
397 DefMI->getOperand(1).isCImm());
398 return DefMI->getOperand(1).getCImm()->getValue().getZExtValue();
399}
400
401// Return type of the instruction result from spv_assign_type intrinsic.
402// TODO: maybe unify with prelegalizer pass.
404 MachineInstr *NextMI = MI->getNextNode();
405 if (!NextMI)
406 return nullptr;
407 if (isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_name))
408 if ((NextMI = NextMI->getNextNode()) == nullptr)
409 return nullptr;
410 Register ValueReg = MI->getOperand(0).getReg();
411 if ((!isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_type) &&
412 !isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_ptr_type)) ||
413 NextMI->getOperand(1).getReg() != ValueReg)
414 return nullptr;
415 Type *Ty = getMDOperandAsType(NextMI->getOperand(2).getMetadata(), 0);
416 assert(Ty && "Type is expected");
417 return Ty;
418}
419
420static const Type *getBlockStructType(Register ParamReg,
422 // In principle, this information should be passed to us from Clang via
423 // an elementtype attribute. However, said attribute requires that
424 // the function call be an intrinsic, which is not. Instead, we rely on being
425 // able to trace this to the declaration of a variable: OpenCL C specification
426 // section 6.12.5 should guarantee that we can do this.
428 if (MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE)
429 return MI->getOperand(1).getGlobal()->getType();
430 assert(isSpvIntrinsic(*MI, Intrinsic::spv_alloca) &&
431 "Blocks in OpenCL C must be traceable to allocation site");
432 return getMachineInstrType(MI);
433}
434
435//===----------------------------------------------------------------------===//
436// Helper functions for building misc instructions
437//===----------------------------------------------------------------------===//
438
439/// Helper function building either a resulting scalar or vector bool register
440/// depending on the expected \p ResultType.
441///
442/// \returns Tuple of the resulting register and its type.
443static std::tuple<Register, SPIRVType *>
444buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType,
446 LLT Type;
447 SPIRVType *BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder, true);
448
449 if (ResultType->getOpcode() == SPIRV::OpTypeVector) {
450 unsigned VectorElements = ResultType->getOperand(2).getImm();
451 BoolType = GR->getOrCreateSPIRVVectorType(BoolType, VectorElements,
452 MIRBuilder, true);
455 Type = LLT::vector(LLVMVectorType->getElementCount(), 1);
456 } else {
457 Type = LLT::scalar(1);
458 }
459
460 Register ResultRegister =
462 MIRBuilder.getMRI()->setRegClass(ResultRegister, GR->getRegClass(ResultType));
463 GR->assignSPIRVTypeToVReg(BoolType, ResultRegister, MIRBuilder.getMF());
464 return std::make_tuple(ResultRegister, BoolType);
465}
466
467/// Helper function for building either a vector or scalar select instruction
468/// depending on the expected \p ResultType.
469static bool buildSelectInst(MachineIRBuilder &MIRBuilder,
470 Register ReturnRegister, Register SourceRegister,
471 const SPIRVType *ReturnType,
473 Register TrueConst, FalseConst;
474
475 if (ReturnType->getOpcode() == SPIRV::OpTypeVector) {
476 unsigned Bits = GR->getScalarOrVectorBitWidth(ReturnType);
478 TrueConst =
479 GR->getOrCreateConsIntVector(AllOnes, MIRBuilder, ReturnType, true);
480 FalseConst = GR->getOrCreateConsIntVector(0, MIRBuilder, ReturnType, true);
481 } else {
482 TrueConst = GR->buildConstantInt(1, MIRBuilder, ReturnType, true);
483 FalseConst = GR->buildConstantInt(0, MIRBuilder, ReturnType, true);
484 }
485
486 return MIRBuilder.buildSelect(ReturnRegister, SourceRegister, TrueConst,
487 FalseConst);
488}
489
490/// Helper function for building a load instruction loading into the
491/// \p DestinationReg.
493 MachineIRBuilder &MIRBuilder,
494 SPIRVGlobalRegistry *GR, LLT LowLevelType,
495 Register DestinationReg = Register(0)) {
496 if (!DestinationReg.isValid())
497 DestinationReg = createVirtualRegister(BaseType, GR, MIRBuilder);
498 // TODO: consider using correct address space and alignment (p0 is canonical
499 // type for selection though).
501 MIRBuilder.buildLoad(DestinationReg, PtrRegister, PtrInfo, Align());
502 return DestinationReg;
503}
504
505/// Helper function for building a load instruction for loading a builtin global
506/// variable of \p BuiltinValue value.
508 MachineIRBuilder &MIRBuilder, SPIRVType *VariableType,
509 SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType,
510 Register Reg = Register(0), bool isConst = true,
511 const std::optional<SPIRV::LinkageType::LinkageType> &LinkageTy = {
512 SPIRV::LinkageType::Import}) {
513 Register NewRegister =
514 MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::pIDRegClass);
515 MIRBuilder.getMRI()->setType(
516 NewRegister,
517 LLT::pointer(storageClassToAddressSpace(SPIRV::StorageClass::Function),
518 GR->getPointerSize()));
520 VariableType, MIRBuilder, SPIRV::StorageClass::Input);
521 GR->assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
522
523 // Set up the global OpVariable with the necessary builtin decorations.
524 Register Variable = GR->buildGlobalVariable(
525 NewRegister, PtrType, getLinkStringForBuiltIn(BuiltinValue), nullptr,
526 SPIRV::StorageClass::Input, nullptr, /* isConst= */ isConst, LinkageTy,
527 MIRBuilder, false);
528
529 // Load the value from the global variable.
530 Register LoadedRegister =
531 buildLoadInst(VariableType, Variable, MIRBuilder, GR, LLType, Reg);
532 MIRBuilder.getMRI()->setType(LoadedRegister, LLType);
533 return LoadedRegister;
534}
535
536/// Helper external function for assigning SPIRVType to a register, ensuring the
537/// register class and type are set in MRI. Defined in SPIRVPreLegalizer.cpp.
538extern void updateRegType(Register Reg, Type *Ty, SPIRVType *SpirvTy,
541
542// TODO: Move to TableGen.
543static SPIRV::MemorySemantics::MemorySemantics
544getSPIRVMemSemantics(std::memory_order MemOrder) {
545 switch (MemOrder) {
546 case std::memory_order_relaxed:
547 return SPIRV::MemorySemantics::None;
548 case std::memory_order_acquire:
549 return SPIRV::MemorySemantics::Acquire;
550 case std::memory_order_release:
551 return SPIRV::MemorySemantics::Release;
552 case std::memory_order_acq_rel:
553 return SPIRV::MemorySemantics::AcquireRelease;
554 case std::memory_order_seq_cst:
555 return SPIRV::MemorySemantics::SequentiallyConsistent;
556 default:
557 report_fatal_error("Unknown CL memory scope");
558 }
559}
560
561static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope) {
562 switch (ClScope) {
563 case SPIRV::CLMemoryScope::memory_scope_work_item:
564 return SPIRV::Scope::Invocation;
565 case SPIRV::CLMemoryScope::memory_scope_work_group:
566 return SPIRV::Scope::Workgroup;
567 case SPIRV::CLMemoryScope::memory_scope_device:
568 return SPIRV::Scope::Device;
569 case SPIRV::CLMemoryScope::memory_scope_all_svm_devices:
570 return SPIRV::Scope::CrossDevice;
571 case SPIRV::CLMemoryScope::memory_scope_sub_group:
572 return SPIRV::Scope::Subgroup;
573 }
574 report_fatal_error("Unknown CL memory scope");
575}
576
578 MachineIRBuilder &MIRBuilder,
580 return GR->buildConstantInt(
581 Val, MIRBuilder, GR->getOrCreateSPIRVIntegerType(32, MIRBuilder), true);
582}
583
584static Register buildScopeReg(Register CLScopeRegister,
585 SPIRV::Scope::Scope Scope,
586 MachineIRBuilder &MIRBuilder,
589 if (CLScopeRegister.isValid()) {
590 auto CLScope =
591 static_cast<SPIRV::CLMemoryScope>(getIConstVal(CLScopeRegister, MRI));
592 Scope = getSPIRVScope(CLScope);
593
594 if (CLScope == static_cast<unsigned>(Scope)) {
595 MRI->setRegClass(CLScopeRegister, &SPIRV::iIDRegClass);
596 return CLScopeRegister;
597 }
598 }
599 return buildConstantIntReg32(Scope, MIRBuilder, GR);
600}
601
604 if (MRI->getRegClassOrNull(Reg))
605 return;
606 SPIRVType *SpvType = GR->getSPIRVTypeForVReg(Reg);
607 MRI->setRegClass(Reg,
608 SpvType ? GR->getRegClass(SpvType) : &SPIRV::iIDRegClass);
609}
610
611static Register buildMemSemanticsReg(Register SemanticsRegister,
612 Register PtrRegister, unsigned &Semantics,
613 MachineIRBuilder &MIRBuilder,
615 if (SemanticsRegister.isValid()) {
616 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
617 std::memory_order Order =
618 static_cast<std::memory_order>(getIConstVal(SemanticsRegister, MRI));
619 Semantics =
620 getSPIRVMemSemantics(Order) |
622 if (static_cast<unsigned>(Order) == Semantics) {
623 MRI->setRegClass(SemanticsRegister, &SPIRV::iIDRegClass);
624 return SemanticsRegister;
625 }
626 }
627 return buildConstantIntReg32(Semantics, MIRBuilder, GR);
628}
629
630static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode,
632 Register TypeReg,
633 ArrayRef<uint32_t> ImmArgs = {}) {
634 auto MIB = MIRBuilder.buildInstr(Opcode);
635 if (TypeReg.isValid())
636 MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
637 unsigned Sz = Call->Arguments.size() - ImmArgs.size();
638 for (unsigned i = 0; i < Sz; ++i)
639 MIB.addUse(Call->Arguments[i]);
640 for (uint32_t ImmArg : ImmArgs)
641 MIB.addImm(ImmArg);
642 return true;
643}
644
645/// Helper function for translating atomic init to OpStore.
647 MachineIRBuilder &MIRBuilder) {
648 if (Call->isSpirvOp())
649 return buildOpFromWrapper(MIRBuilder, SPIRV::OpStore, Call, Register(0));
650
651 assert(Call->Arguments.size() == 2 &&
652 "Need 2 arguments for atomic init translation");
653 MIRBuilder.buildInstr(SPIRV::OpStore)
654 .addUse(Call->Arguments[0])
655 .addUse(Call->Arguments[1]);
656 return true;
657}
658
659/// Helper function for building an atomic load instruction.
661 MachineIRBuilder &MIRBuilder,
663 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
664 if (Call->isSpirvOp())
665 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicLoad, Call, TypeReg);
666
667 Register PtrRegister = Call->Arguments[0];
668 // TODO: if true insert call to __translate_ocl_memory_sccope before
669 // OpAtomicLoad and the function implementation. We can use Translator's
670 // output for transcoding/atomic_explicit_arguments.cl as an example.
671 Register ScopeRegister =
672 Call->Arguments.size() > 1
673 ? Call->Arguments[1]
674 : buildConstantIntReg32(SPIRV::Scope::Device, MIRBuilder, GR);
675 Register MemSemanticsReg;
676 if (Call->Arguments.size() > 2) {
677 // TODO: Insert call to __translate_ocl_memory_order before OpAtomicLoad.
678 MemSemanticsReg = Call->Arguments[2];
679 } else {
680 int Semantics =
681 SPIRV::MemorySemantics::SequentiallyConsistent |
683 MemSemanticsReg = buildConstantIntReg32(Semantics, MIRBuilder, GR);
684 }
685
686 MIRBuilder.buildInstr(SPIRV::OpAtomicLoad)
687 .addDef(Call->ReturnRegister)
688 .addUse(TypeReg)
689 .addUse(PtrRegister)
690 .addUse(ScopeRegister)
691 .addUse(MemSemanticsReg);
692 return true;
693}
694
695/// Helper function for building an atomic store instruction.
697 MachineIRBuilder &MIRBuilder,
699 if (Call->isSpirvOp())
700 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicStore, Call,
701 Register(0));
702
703 Register ScopeRegister =
704 buildConstantIntReg32(SPIRV::Scope::Device, MIRBuilder, GR);
705 Register PtrRegister = Call->Arguments[0];
706 int Semantics =
707 SPIRV::MemorySemantics::SequentiallyConsistent |
709 Register MemSemanticsReg = buildConstantIntReg32(Semantics, MIRBuilder, GR);
710 MIRBuilder.buildInstr(SPIRV::OpAtomicStore)
711 .addUse(PtrRegister)
712 .addUse(ScopeRegister)
713 .addUse(MemSemanticsReg)
714 .addUse(Call->Arguments[1]);
715 return true;
716}
717
718/// Helper function for building an atomic compare-exchange instruction.
720 const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin,
721 unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
722 if (Call->isSpirvOp())
723 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
724 GR->getSPIRVTypeID(Call->ReturnType));
725
726 bool IsCmpxchg = Call->Builtin->Name.contains("cmpxchg");
727 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
728
729 Register ObjectPtr = Call->Arguments[0]; // Pointer (volatile A *object.)
730 Register ExpectedArg = Call->Arguments[1]; // Comparator (C* expected).
731 Register Desired = Call->Arguments[2]; // Value (C Desired).
732 SPIRVType *SpvDesiredTy = GR->getSPIRVTypeForVReg(Desired);
733 LLT DesiredLLT = MRI->getType(Desired);
734
735 assert(GR->getSPIRVTypeForVReg(ObjectPtr)->getOpcode() ==
736 SPIRV::OpTypePointer);
737 unsigned ExpectedType = GR->getSPIRVTypeForVReg(ExpectedArg)->getOpcode();
738 (void)ExpectedType;
739 assert(IsCmpxchg ? ExpectedType == SPIRV::OpTypeInt
740 : ExpectedType == SPIRV::OpTypePointer);
741 assert(GR->isScalarOfType(Desired, SPIRV::OpTypeInt));
742
743 SPIRVType *SpvObjectPtrTy = GR->getSPIRVTypeForVReg(ObjectPtr);
744 assert(SpvObjectPtrTy->getOperand(2).isReg() && "SPIRV type is expected");
745 auto StorageClass = static_cast<SPIRV::StorageClass::StorageClass>(
746 SpvObjectPtrTy->getOperand(1).getImm());
747 auto MemSemStorage = getMemSemanticsForStorageClass(StorageClass);
748
749 Register MemSemEqualReg;
750 Register MemSemUnequalReg;
751 uint64_t MemSemEqual =
752 IsCmpxchg
753 ? SPIRV::MemorySemantics::None
754 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
755 uint64_t MemSemUnequal =
756 IsCmpxchg
757 ? SPIRV::MemorySemantics::None
758 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
759 if (Call->Arguments.size() >= 4) {
760 assert(Call->Arguments.size() >= 5 &&
761 "Need 5+ args for explicit atomic cmpxchg");
762 auto MemOrdEq =
763 static_cast<std::memory_order>(getIConstVal(Call->Arguments[3], MRI));
764 auto MemOrdNeq =
765 static_cast<std::memory_order>(getIConstVal(Call->Arguments[4], MRI));
766 MemSemEqual = getSPIRVMemSemantics(MemOrdEq) | MemSemStorage;
767 MemSemUnequal = getSPIRVMemSemantics(MemOrdNeq) | MemSemStorage;
768 if (static_cast<unsigned>(MemOrdEq) == MemSemEqual)
769 MemSemEqualReg = Call->Arguments[3];
770 if (static_cast<unsigned>(MemOrdNeq) == MemSemEqual)
771 MemSemUnequalReg = Call->Arguments[4];
772 }
773 if (!MemSemEqualReg.isValid())
774 MemSemEqualReg = buildConstantIntReg32(MemSemEqual, MIRBuilder, GR);
775 if (!MemSemUnequalReg.isValid())
776 MemSemUnequalReg = buildConstantIntReg32(MemSemUnequal, MIRBuilder, GR);
777
778 Register ScopeReg;
779 auto Scope = IsCmpxchg ? SPIRV::Scope::Workgroup : SPIRV::Scope::Device;
780 if (Call->Arguments.size() >= 6) {
781 assert(Call->Arguments.size() == 6 &&
782 "Extra args for explicit atomic cmpxchg");
783 auto ClScope = static_cast<SPIRV::CLMemoryScope>(
784 getIConstVal(Call->Arguments[5], MRI));
785 Scope = getSPIRVScope(ClScope);
786 if (ClScope == static_cast<unsigned>(Scope))
787 ScopeReg = Call->Arguments[5];
788 }
789 if (!ScopeReg.isValid())
790 ScopeReg = buildConstantIntReg32(Scope, MIRBuilder, GR);
791
792 Register Expected = IsCmpxchg
793 ? ExpectedArg
794 : buildLoadInst(SpvDesiredTy, ExpectedArg, MIRBuilder,
795 GR, LLT::scalar(64));
796 MRI->setType(Expected, DesiredLLT);
797 Register Tmp = !IsCmpxchg ? MRI->createGenericVirtualRegister(DesiredLLT)
798 : Call->ReturnRegister;
799 if (!MRI->getRegClassOrNull(Tmp))
800 MRI->setRegClass(Tmp, GR->getRegClass(SpvDesiredTy));
801 GR->assignSPIRVTypeToVReg(SpvDesiredTy, Tmp, MIRBuilder.getMF());
802
803 MIRBuilder.buildInstr(Opcode)
804 .addDef(Tmp)
805 .addUse(GR->getSPIRVTypeID(SpvDesiredTy))
806 .addUse(ObjectPtr)
807 .addUse(ScopeReg)
808 .addUse(MemSemEqualReg)
809 .addUse(MemSemUnequalReg)
810 .addUse(Desired)
812 if (!IsCmpxchg) {
813 MIRBuilder.buildInstr(SPIRV::OpStore).addUse(ExpectedArg).addUse(Tmp);
814 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Call->ReturnRegister, Tmp, Expected);
815 }
816 return true;
817}
818
819/// Helper function for building atomic instructions.
820static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
821 MachineIRBuilder &MIRBuilder,
823 if (Call->isSpirvOp())
824 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
825 GR->getSPIRVTypeID(Call->ReturnType));
826
827 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
828 Register ScopeRegister =
829 Call->Arguments.size() >= 4 ? Call->Arguments[3] : Register();
830
831 assert(Call->Arguments.size() <= 4 &&
832 "Too many args for explicit atomic RMW");
833 ScopeRegister = buildScopeReg(ScopeRegister, SPIRV::Scope::Workgroup,
834 MIRBuilder, GR, MRI);
835
836 Register PtrRegister = Call->Arguments[0];
837 unsigned Semantics = SPIRV::MemorySemantics::None;
838 Register MemSemanticsReg =
839 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
840 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
841 Semantics, MIRBuilder, GR);
842 Register ValueReg = Call->Arguments[1];
843 Register ValueTypeReg = GR->getSPIRVTypeID(Call->ReturnType);
844 // support cl_ext_float_atomics
845 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeFloat) {
846 if (Opcode == SPIRV::OpAtomicIAdd) {
847 Opcode = SPIRV::OpAtomicFAddEXT;
848 } else if (Opcode == SPIRV::OpAtomicISub) {
849 // Translate OpAtomicISub applied to a floating type argument to
850 // OpAtomicFAddEXT with the negative value operand
851 Opcode = SPIRV::OpAtomicFAddEXT;
852 Register NegValueReg =
853 MRI->createGenericVirtualRegister(MRI->getType(ValueReg));
854 MRI->setRegClass(NegValueReg, GR->getRegClass(Call->ReturnType));
855 GR->assignSPIRVTypeToVReg(Call->ReturnType, NegValueReg,
856 MIRBuilder.getMF());
857 MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
858 .addDef(NegValueReg)
859 .addUse(ValueReg);
860 updateRegType(NegValueReg, nullptr, Call->ReturnType, GR, MIRBuilder,
861 MIRBuilder.getMF().getRegInfo());
862 ValueReg = NegValueReg;
863 }
864 }
865 MIRBuilder.buildInstr(Opcode)
866 .addDef(Call->ReturnRegister)
867 .addUse(ValueTypeReg)
868 .addUse(PtrRegister)
869 .addUse(ScopeRegister)
870 .addUse(MemSemanticsReg)
871 .addUse(ValueReg);
872 return true;
873}
874
875/// Helper function for building an atomic floating-type instruction.
877 unsigned Opcode,
878 MachineIRBuilder &MIRBuilder,
880 assert(Call->Arguments.size() == 4 &&
881 "Wrong number of atomic floating-type builtin");
882 Register PtrReg = Call->Arguments[0];
883 Register ScopeReg = Call->Arguments[1];
884 Register MemSemanticsReg = Call->Arguments[2];
885 Register ValueReg = Call->Arguments[3];
886 MIRBuilder.buildInstr(Opcode)
887 .addDef(Call->ReturnRegister)
888 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
889 .addUse(PtrReg)
890 .addUse(ScopeReg)
891 .addUse(MemSemanticsReg)
892 .addUse(ValueReg);
893 return true;
894}
895
896/// Helper function for building atomic flag instructions (e.g.
897/// OpAtomicFlagTestAndSet).
899 unsigned Opcode, MachineIRBuilder &MIRBuilder,
901 bool IsSet = Opcode == SPIRV::OpAtomicFlagTestAndSet;
902 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
903 if (Call->isSpirvOp())
904 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
905 IsSet ? TypeReg : Register(0));
906
907 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
908 Register PtrRegister = Call->Arguments[0];
909 unsigned Semantics = SPIRV::MemorySemantics::SequentiallyConsistent;
910 Register MemSemanticsReg =
911 Call->Arguments.size() >= 2 ? Call->Arguments[1] : Register();
912 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
913 Semantics, MIRBuilder, GR);
914
915 assert((Opcode != SPIRV::OpAtomicFlagClear ||
916 (Semantics != SPIRV::MemorySemantics::Acquire &&
917 Semantics != SPIRV::MemorySemantics::AcquireRelease)) &&
918 "Invalid memory order argument!");
919
920 Register ScopeRegister =
921 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
922 ScopeRegister =
923 buildScopeReg(ScopeRegister, SPIRV::Scope::Device, MIRBuilder, GR, MRI);
924
925 auto MIB = MIRBuilder.buildInstr(Opcode);
926 if (IsSet)
927 MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
928
929 MIB.addUse(PtrRegister).addUse(ScopeRegister).addUse(MemSemanticsReg);
930 return true;
931}
932
933/// Helper function for building barriers, i.e., memory/control ordering
934/// operations.
935static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
936 MachineIRBuilder &MIRBuilder,
938 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
939 const auto *ST =
940 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
941 if ((Opcode == SPIRV::OpControlBarrierArriveINTEL ||
942 Opcode == SPIRV::OpControlBarrierWaitINTEL) &&
943 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_split_barrier)) {
944 std::string DiagMsg = std::string(Builtin->Name) +
945 ": the builtin requires the following SPIR-V "
946 "extension: SPV_INTEL_split_barrier";
947 report_fatal_error(DiagMsg.c_str(), false);
948 }
949
950 if (Call->isSpirvOp())
951 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
952
953 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
954 unsigned MemFlags = getIConstVal(Call->Arguments[0], MRI);
955 unsigned MemSemantics = SPIRV::MemorySemantics::None;
956
957 if (MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE)
958 MemSemantics |= SPIRV::MemorySemantics::WorkgroupMemory;
959
960 if (MemFlags & SPIRV::CLK_GLOBAL_MEM_FENCE)
961 MemSemantics |= SPIRV::MemorySemantics::CrossWorkgroupMemory;
962
963 if (MemFlags & SPIRV::CLK_IMAGE_MEM_FENCE)
964 MemSemantics |= SPIRV::MemorySemantics::ImageMemory;
965
966 if (Opcode == SPIRV::OpMemoryBarrier)
967 MemSemantics = getSPIRVMemSemantics(static_cast<std::memory_order>(
968 getIConstVal(Call->Arguments[1], MRI))) |
969 MemSemantics;
970 else if (Opcode == SPIRV::OpControlBarrierArriveINTEL)
971 MemSemantics |= SPIRV::MemorySemantics::Release;
972 else if (Opcode == SPIRV::OpControlBarrierWaitINTEL)
973 MemSemantics |= SPIRV::MemorySemantics::Acquire;
974 else
975 MemSemantics |= SPIRV::MemorySemantics::SequentiallyConsistent;
976
977 Register MemSemanticsReg =
978 MemFlags == MemSemantics
979 ? Call->Arguments[0]
980 : buildConstantIntReg32(MemSemantics, MIRBuilder, GR);
981 Register ScopeReg;
982 SPIRV::Scope::Scope Scope = SPIRV::Scope::Workgroup;
983 SPIRV::Scope::Scope MemScope = Scope;
984 if (Call->Arguments.size() >= 2) {
985 assert(
986 ((Opcode != SPIRV::OpMemoryBarrier && Call->Arguments.size() == 2) ||
987 (Opcode == SPIRV::OpMemoryBarrier && Call->Arguments.size() == 3)) &&
988 "Extra args for explicitly scoped barrier");
989 Register ScopeArg = (Opcode == SPIRV::OpMemoryBarrier) ? Call->Arguments[2]
990 : Call->Arguments[1];
991 SPIRV::CLMemoryScope CLScope =
992 static_cast<SPIRV::CLMemoryScope>(getIConstVal(ScopeArg, MRI));
993 MemScope = getSPIRVScope(CLScope);
994 if (!(MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE) ||
995 (Opcode == SPIRV::OpMemoryBarrier))
996 Scope = MemScope;
997 if (CLScope == static_cast<unsigned>(Scope))
998 ScopeReg = Call->Arguments[1];
999 }
1000
1001 if (!ScopeReg.isValid())
1002 ScopeReg = buildConstantIntReg32(Scope, MIRBuilder, GR);
1003
1004 auto MIB = MIRBuilder.buildInstr(Opcode).addUse(ScopeReg);
1005 if (Opcode != SPIRV::OpMemoryBarrier)
1006 MIB.addUse(buildConstantIntReg32(MemScope, MIRBuilder, GR));
1007 MIB.addUse(MemSemanticsReg);
1008 return true;
1009}
1010
1011/// Helper function for building extended bit operations.
1013 unsigned Opcode,
1014 MachineIRBuilder &MIRBuilder,
1015 SPIRVGlobalRegistry *GR) {
1016 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1017 const auto *ST =
1018 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
1019 if ((Opcode == SPIRV::OpBitFieldInsert ||
1020 Opcode == SPIRV::OpBitFieldSExtract ||
1021 Opcode == SPIRV::OpBitFieldUExtract || Opcode == SPIRV::OpBitReverse) &&
1022 !ST->canUseExtension(SPIRV::Extension::SPV_KHR_bit_instructions)) {
1023 std::string DiagMsg = std::string(Builtin->Name) +
1024 ": the builtin requires the following SPIR-V "
1025 "extension: SPV_KHR_bit_instructions";
1026 report_fatal_error(DiagMsg.c_str(), false);
1027 }
1028
1029 // Generate SPIRV instruction accordingly.
1030 if (Call->isSpirvOp())
1031 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1032 GR->getSPIRVTypeID(Call->ReturnType));
1033
1034 auto MIB = MIRBuilder.buildInstr(Opcode)
1035 .addDef(Call->ReturnRegister)
1036 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1037 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1038 MIB.addUse(Call->Arguments[i]);
1039
1040 return true;
1041}
1042
1043/// Helper function for building Intel's bindless image instructions.
1045 unsigned Opcode,
1046 MachineIRBuilder &MIRBuilder,
1047 SPIRVGlobalRegistry *GR) {
1048 // Generate SPIRV instruction accordingly.
1049 if (Call->isSpirvOp())
1050 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1051 GR->getSPIRVTypeID(Call->ReturnType));
1052
1053 MIRBuilder.buildInstr(Opcode)
1054 .addDef(Call->ReturnRegister)
1055 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1056 .addUse(Call->Arguments[0]);
1057
1058 return true;
1059}
1060
1061/// Helper function for building Intel's OpBitwiseFunctionINTEL instruction.
1063 const SPIRV::IncomingCall *Call, unsigned Opcode,
1064 MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
1065 // Generate SPIRV instruction accordingly.
1066 if (Call->isSpirvOp())
1067 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1068 GR->getSPIRVTypeID(Call->ReturnType));
1069
1070 auto MIB = MIRBuilder.buildInstr(Opcode)
1071 .addDef(Call->ReturnRegister)
1072 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1073 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1074 MIB.addUse(Call->Arguments[i]);
1075
1076 return true;
1077}
1078
1080 unsigned Opcode,
1081 MachineIRBuilder &MIRBuilder,
1082 SPIRVGlobalRegistry *GR) {
1083 if (Call->isSpirvOp())
1084 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1085 GR->getSPIRVTypeID(Call->ReturnType));
1086
1087 auto MIB = MIRBuilder.buildInstr(Opcode)
1088 .addDef(Call->ReturnRegister)
1089 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1090 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1091 MIB.addUse(Call->Arguments[i]);
1092
1093 return true;
1094}
1095
1096/// Helper function for building Intel's 2d block io instructions.
1098 unsigned Opcode,
1099 MachineIRBuilder &MIRBuilder,
1100 SPIRVGlobalRegistry *GR) {
1101 // Generate SPIRV instruction accordingly.
1102 if (Call->isSpirvOp())
1103 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
1104
1105 auto MIB = MIRBuilder.buildInstr(Opcode)
1106 .addDef(Call->ReturnRegister)
1107 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1108 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1109 MIB.addUse(Call->Arguments[i]);
1110
1111 return true;
1112}
1113
1114static bool buildPipeInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
1115 unsigned Scope, MachineIRBuilder &MIRBuilder,
1116 SPIRVGlobalRegistry *GR) {
1117 switch (Opcode) {
1118 case SPIRV::OpCommitReadPipe:
1119 case SPIRV::OpCommitWritePipe:
1120 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
1121 case SPIRV::OpGroupCommitReadPipe:
1122 case SPIRV::OpGroupCommitWritePipe:
1123 case SPIRV::OpGroupReserveReadPipePackets:
1124 case SPIRV::OpGroupReserveWritePipePackets: {
1125 Register ScopeConstReg =
1126 MIRBuilder.buildConstant(LLT::scalar(32), Scope).getReg(0);
1127 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1128 MRI->setRegClass(ScopeConstReg, &SPIRV::iIDRegClass);
1130 MIB = MIRBuilder.buildInstr(Opcode);
1131 // Add Return register and type.
1132 if (Opcode == SPIRV::OpGroupReserveReadPipePackets ||
1133 Opcode == SPIRV::OpGroupReserveWritePipePackets)
1134 MIB.addDef(Call->ReturnRegister)
1135 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1136
1137 MIB.addUse(ScopeConstReg);
1138 for (unsigned int i = 0; i < Call->Arguments.size(); ++i)
1139 MIB.addUse(Call->Arguments[i]);
1140
1141 return true;
1142 }
1143 default:
1144 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1145 GR->getSPIRVTypeID(Call->ReturnType));
1146 }
1147}
1148
1149static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim) {
1150 switch (dim) {
1151 case SPIRV::Dim::DIM_1D:
1152 case SPIRV::Dim::DIM_Buffer:
1153 return 1;
1154 case SPIRV::Dim::DIM_2D:
1155 case SPIRV::Dim::DIM_Cube:
1156 case SPIRV::Dim::DIM_Rect:
1157 return 2;
1158 case SPIRV::Dim::DIM_3D:
1159 return 3;
1160 default:
1161 report_fatal_error("Cannot get num components for given Dim");
1162 }
1163}
1164
1165/// Helper function for obtaining the number of size components.
1166static unsigned getNumSizeComponents(SPIRVType *imgType) {
1167 assert(imgType->getOpcode() == SPIRV::OpTypeImage);
1168 auto dim = static_cast<SPIRV::Dim::Dim>(imgType->getOperand(2).getImm());
1169 unsigned numComps = getNumComponentsForDim(dim);
1170 bool arrayed = imgType->getOperand(4).getImm() == 1;
1171 return arrayed ? numComps + 1 : numComps;
1172}
1173
1174static bool builtinMayNeedPromotionToVec(uint32_t BuiltinNumber) {
1175 switch (BuiltinNumber) {
1176 case SPIRV::OpenCLExtInst::s_min:
1177 case SPIRV::OpenCLExtInst::u_min:
1178 case SPIRV::OpenCLExtInst::s_max:
1179 case SPIRV::OpenCLExtInst::u_max:
1180 case SPIRV::OpenCLExtInst::fmax:
1181 case SPIRV::OpenCLExtInst::fmin:
1182 case SPIRV::OpenCLExtInst::fmax_common:
1183 case SPIRV::OpenCLExtInst::fmin_common:
1184 case SPIRV::OpenCLExtInst::s_clamp:
1185 case SPIRV::OpenCLExtInst::fclamp:
1186 case SPIRV::OpenCLExtInst::u_clamp:
1187 case SPIRV::OpenCLExtInst::mix:
1188 case SPIRV::OpenCLExtInst::step:
1189 case SPIRV::OpenCLExtInst::smoothstep:
1190 return true;
1191 default:
1192 break;
1193 }
1194 return false;
1195}
1196
1197//===----------------------------------------------------------------------===//
1198// Implementation functions for each builtin group
1199//===----------------------------------------------------------------------===//
1200
1203 MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
1204
1205 Register ReturnTypeId = GR->getSPIRVTypeID(Call->ReturnType);
1206 unsigned ResultElementCount =
1207 GR->getScalarOrVectorComponentCount(ReturnTypeId);
1208 bool MayNeedPromotionToVec =
1209 builtinMayNeedPromotionToVec(BuiltinNumber) && ResultElementCount > 1;
1210
1211 if (!MayNeedPromotionToVec)
1212 return {Call->Arguments.begin(), Call->Arguments.end()};
1213
1215 for (Register Argument : Call->Arguments) {
1216 Register VecArg = Argument;
1217 SPIRVType *ArgumentType = GR->getSPIRVTypeForVReg(Argument);
1218 if (ArgumentType != Call->ReturnType) {
1219 VecArg = createVirtualRegister(Call->ReturnType, GR, MIRBuilder);
1220 auto VecSplat = MIRBuilder.buildInstr(SPIRV::OpCompositeConstruct)
1221 .addDef(VecArg)
1222 .addUse(ReturnTypeId);
1223 for (unsigned I = 0; I != ResultElementCount; ++I)
1224 VecSplat.addUse(Argument);
1225 }
1226 Arguments.push_back(VecArg);
1227 }
1228 return Arguments;
1229}
1230
1232 MachineIRBuilder &MIRBuilder,
1233 SPIRVGlobalRegistry *GR, const CallBase &CB) {
1234 // Lookup the extended instruction number in the TableGen records.
1235 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1237 SPIRV::lookupExtendedBuiltin(Builtin->Name, Builtin->Set)->Number;
1238 // fmin_common and fmax_common are now deprecated, and we should use fmin and
1239 // fmax with NotInf and NotNaN flags instead. Keep original number to add
1240 // later the NoNans and NoInfs flags.
1241 uint32_t OrigNumber = Number;
1242 const SPIRVSubtarget &ST =
1243 cast<SPIRVSubtarget>(MIRBuilder.getMF().getSubtarget());
1244 if (ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2) &&
1245 (Number == SPIRV::OpenCLExtInst::fmin_common ||
1246 Number == SPIRV::OpenCLExtInst::fmax_common)) {
1247 Number = (Number == SPIRV::OpenCLExtInst::fmin_common)
1248 ? SPIRV::OpenCLExtInst::fmin
1249 : SPIRV::OpenCLExtInst::fmax;
1250 }
1251
1252 Register ReturnTypeId = GR->getSPIRVTypeID(Call->ReturnType);
1254 getBuiltinCallArguments(Call, Number, MIRBuilder, GR);
1255
1256 // Build extended instruction.
1257 auto MIB =
1258 MIRBuilder.buildInstr(SPIRV::OpExtInst)
1259 .addDef(Call->ReturnRegister)
1260 .addUse(ReturnTypeId)
1261 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
1262 .addImm(Number);
1263
1265 MIB.addUse(Argument);
1266
1267 MIB.getInstr()->copyIRFlags(CB);
1268 if (OrigNumber == SPIRV::OpenCLExtInst::fmin_common ||
1269 OrigNumber == SPIRV::OpenCLExtInst::fmax_common) {
1270 // Add NoNans and NoInfs flags to fmin/fmax instruction.
1271 MIB.getInstr()->setFlag(MachineInstr::MIFlag::FmNoNans);
1272 MIB.getInstr()->setFlag(MachineInstr::MIFlag::FmNoInfs);
1273 }
1274 return true;
1275}
1276
1278 MachineIRBuilder &MIRBuilder,
1279 SPIRVGlobalRegistry *GR) {
1280 // Lookup the instruction opcode in the TableGen records.
1281 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1282 unsigned Opcode =
1283 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1284
1285 Register CompareRegister;
1286 SPIRVType *RelationType;
1287 std::tie(CompareRegister, RelationType) =
1288 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
1289
1290 // Build relational instruction.
1291 auto MIB = MIRBuilder.buildInstr(Opcode)
1292 .addDef(CompareRegister)
1293 .addUse(GR->getSPIRVTypeID(RelationType));
1294
1295 for (auto Argument : Call->Arguments)
1296 MIB.addUse(Argument);
1297
1298 // Build select instruction.
1299 return buildSelectInst(MIRBuilder, Call->ReturnRegister, CompareRegister,
1300 Call->ReturnType, GR);
1301}
1302
1304 MachineIRBuilder &MIRBuilder,
1305 SPIRVGlobalRegistry *GR) {
1306 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1307 const SPIRV::GroupBuiltin *GroupBuiltin =
1308 SPIRV::lookupGroupBuiltin(Builtin->Name);
1309
1310 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1311 if (Call->isSpirvOp()) {
1312 if (GroupBuiltin->NoGroupOperation) {
1314 if (GroupBuiltin->Opcode ==
1315 SPIRV::OpSubgroupMatrixMultiplyAccumulateINTEL &&
1316 Call->Arguments.size() > 4)
1317 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[4], MRI));
1318 return buildOpFromWrapper(MIRBuilder, GroupBuiltin->Opcode, Call,
1319 GR->getSPIRVTypeID(Call->ReturnType), ImmArgs);
1320 }
1321
1322 // Group Operation is a literal
1323 Register GroupOpReg = Call->Arguments[1];
1324 const MachineInstr *MI = getDefInstrMaybeConstant(GroupOpReg, MRI);
1325 if (!MI || MI->getOpcode() != TargetOpcode::G_CONSTANT)
1327 "Group Operation parameter must be an integer constant");
1328 uint64_t GrpOp = MI->getOperand(1).getCImm()->getValue().getZExtValue();
1329 Register ScopeReg = Call->Arguments[0];
1330 auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode)
1331 .addDef(Call->ReturnRegister)
1332 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1333 .addUse(ScopeReg)
1334 .addImm(GrpOp);
1335 for (unsigned i = 2; i < Call->Arguments.size(); ++i)
1336 MIB.addUse(Call->Arguments[i]);
1337 return true;
1338 }
1339
1340 Register Arg0;
1341 if (GroupBuiltin->HasBoolArg) {
1342 SPIRVType *BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder, true);
1343 Register BoolReg = Call->Arguments[0];
1344 SPIRVType *BoolRegType = GR->getSPIRVTypeForVReg(BoolReg);
1345 if (!BoolRegType)
1346 report_fatal_error("Can't find a register's type definition");
1347 MachineInstr *ArgInstruction = getDefInstrMaybeConstant(BoolReg, MRI);
1348 if (ArgInstruction->getOpcode() == TargetOpcode::G_CONSTANT) {
1349 if (BoolRegType->getOpcode() != SPIRV::OpTypeBool)
1350 Arg0 = GR->buildConstantInt(getIConstVal(BoolReg, MRI), MIRBuilder,
1351 BoolType, true);
1352 } else {
1353 if (BoolRegType->getOpcode() == SPIRV::OpTypeInt) {
1354 Arg0 = MRI->createGenericVirtualRegister(LLT::scalar(1));
1355 MRI->setRegClass(Arg0, &SPIRV::iIDRegClass);
1356 GR->assignSPIRVTypeToVReg(BoolType, Arg0, MIRBuilder.getMF());
1357 MIRBuilder.buildICmp(
1358 CmpInst::ICMP_NE, Arg0, BoolReg,
1359 GR->buildConstantInt(0, MIRBuilder, BoolRegType, true));
1360 updateRegType(Arg0, nullptr, BoolType, GR, MIRBuilder,
1361 MIRBuilder.getMF().getRegInfo());
1362 } else if (BoolRegType->getOpcode() != SPIRV::OpTypeBool) {
1363 report_fatal_error("Expect a boolean argument");
1364 }
1365 // if BoolReg is a boolean register, we don't need to do anything
1366 }
1367 }
1368
1369 Register GroupResultRegister = Call->ReturnRegister;
1370 SPIRVType *GroupResultType = Call->ReturnType;
1371
1372 // TODO: maybe we need to check whether the result type is already boolean
1373 // and in this case do not insert select instruction.
1374 const bool HasBoolReturnTy =
1375 GroupBuiltin->IsElect || GroupBuiltin->IsAllOrAny ||
1376 GroupBuiltin->IsAllEqual || GroupBuiltin->IsLogical ||
1377 GroupBuiltin->IsInverseBallot || GroupBuiltin->IsBallotBitExtract;
1378
1379 if (HasBoolReturnTy)
1380 std::tie(GroupResultRegister, GroupResultType) =
1381 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
1382
1383 auto Scope = Builtin->Name.starts_with("sub_group") ? SPIRV::Scope::Subgroup
1384 : SPIRV::Scope::Workgroup;
1385 Register ScopeRegister = buildConstantIntReg32(Scope, MIRBuilder, GR);
1386
1387 Register VecReg;
1388 if (GroupBuiltin->Opcode == SPIRV::OpGroupBroadcast &&
1389 Call->Arguments.size() > 2) {
1390 // For OpGroupBroadcast "LocalId must be an integer datatype. It must be a
1391 // scalar, a vector with 2 components, or a vector with 3 components.",
1392 // meaning that we must create a vector from the function arguments if
1393 // it's a work_group_broadcast(val, local_id_x, local_id_y) or
1394 // work_group_broadcast(val, local_id_x, local_id_y, local_id_z) call.
1395 Register ElemReg = Call->Arguments[1];
1396 SPIRVType *ElemType = GR->getSPIRVTypeForVReg(ElemReg);
1397 if (!ElemType || ElemType->getOpcode() != SPIRV::OpTypeInt)
1398 report_fatal_error("Expect an integer <LocalId> argument");
1399 unsigned VecLen = Call->Arguments.size() - 1;
1400 VecReg = MRI->createGenericVirtualRegister(
1401 LLT::fixed_vector(VecLen, MRI->getType(ElemReg)));
1402 MRI->setRegClass(VecReg, &SPIRV::vIDRegClass);
1403 SPIRVType *VecType =
1404 GR->getOrCreateSPIRVVectorType(ElemType, VecLen, MIRBuilder, true);
1405 GR->assignSPIRVTypeToVReg(VecType, VecReg, MIRBuilder.getMF());
1406 auto MIB =
1407 MIRBuilder.buildInstr(TargetOpcode::G_BUILD_VECTOR).addDef(VecReg);
1408 for (unsigned i = 1; i < Call->Arguments.size(); i++) {
1409 MIB.addUse(Call->Arguments[i]);
1410 setRegClassIfNull(Call->Arguments[i], MRI, GR);
1411 }
1412 updateRegType(VecReg, nullptr, VecType, GR, MIRBuilder,
1413 MIRBuilder.getMF().getRegInfo());
1414 }
1415
1416 // Build work/sub group instruction.
1417 auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode)
1418 .addDef(GroupResultRegister)
1419 .addUse(GR->getSPIRVTypeID(GroupResultType))
1420 .addUse(ScopeRegister);
1421
1422 if (!GroupBuiltin->NoGroupOperation)
1423 MIB.addImm(GroupBuiltin->GroupOperation);
1424 if (Call->Arguments.size() > 0) {
1425 MIB.addUse(Arg0.isValid() ? Arg0 : Call->Arguments[0]);
1426 setRegClassIfNull(Call->Arguments[0], MRI, GR);
1427 if (VecReg.isValid())
1428 MIB.addUse(VecReg);
1429 else
1430 for (unsigned i = 1; i < Call->Arguments.size(); i++)
1431 MIB.addUse(Call->Arguments[i]);
1432 }
1433
1434 // Build select instruction.
1435 if (HasBoolReturnTy)
1436 buildSelectInst(MIRBuilder, Call->ReturnRegister, GroupResultRegister,
1437 Call->ReturnType, GR);
1438 return true;
1439}
1440
1442 MachineIRBuilder &MIRBuilder,
1443 SPIRVGlobalRegistry *GR) {
1444 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1445 MachineFunction &MF = MIRBuilder.getMF();
1446 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1447 const SPIRV::IntelSubgroupsBuiltin *IntelSubgroups =
1448 SPIRV::lookupIntelSubgroupsBuiltin(Builtin->Name);
1449
1450 if (IntelSubgroups->IsMedia &&
1451 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_media_block_io)) {
1452 std::string DiagMsg = std::string(Builtin->Name) +
1453 ": the builtin requires the following SPIR-V "
1454 "extension: SPV_INTEL_media_block_io";
1455 report_fatal_error(DiagMsg.c_str(), false);
1456 } else if (!IntelSubgroups->IsMedia &&
1457 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1458 std::string DiagMsg = std::string(Builtin->Name) +
1459 ": the builtin requires the following SPIR-V "
1460 "extension: SPV_INTEL_subgroups";
1461 report_fatal_error(DiagMsg.c_str(), false);
1462 }
1463
1464 uint32_t OpCode = IntelSubgroups->Opcode;
1465 if (Call->isSpirvOp()) {
1466 bool IsSet = OpCode != SPIRV::OpSubgroupBlockWriteINTEL &&
1467 OpCode != SPIRV::OpSubgroupImageBlockWriteINTEL &&
1468 OpCode != SPIRV::OpSubgroupImageMediaBlockWriteINTEL;
1469 return buildOpFromWrapper(MIRBuilder, OpCode, Call,
1470 IsSet ? GR->getSPIRVTypeID(Call->ReturnType)
1471 : Register(0));
1472 }
1473
1474 if (IntelSubgroups->IsBlock) {
1475 // Minimal number or arguments set in TableGen records is 1
1476 if (SPIRVType *Arg0Type = GR->getSPIRVTypeForVReg(Call->Arguments[0])) {
1477 if (Arg0Type->getOpcode() == SPIRV::OpTypeImage) {
1478 // TODO: add required validation from the specification:
1479 // "'Image' must be an object whose type is OpTypeImage with a 'Sampled'
1480 // operand of 0 or 2. If the 'Sampled' operand is 2, then some
1481 // dimensions require a capability."
1482 switch (OpCode) {
1483 case SPIRV::OpSubgroupBlockReadINTEL:
1484 OpCode = SPIRV::OpSubgroupImageBlockReadINTEL;
1485 break;
1486 case SPIRV::OpSubgroupBlockWriteINTEL:
1487 OpCode = SPIRV::OpSubgroupImageBlockWriteINTEL;
1488 break;
1489 }
1490 }
1491 }
1492 }
1493
1494 // TODO: opaque pointers types should be eventually resolved in such a way
1495 // that validation of block read is enabled with respect to the following
1496 // specification requirement:
1497 // "'Result Type' may be a scalar or vector type, and its component type must
1498 // be equal to the type pointed to by 'Ptr'."
1499 // For example, function parameter type should not be default i8 pointer, but
1500 // depend on the result type of the instruction where it is used as a pointer
1501 // argument of OpSubgroupBlockReadINTEL
1502
1503 // Build Intel subgroups instruction
1505 IntelSubgroups->IsWrite
1506 ? MIRBuilder.buildInstr(OpCode)
1507 : MIRBuilder.buildInstr(OpCode)
1508 .addDef(Call->ReturnRegister)
1509 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1510 for (size_t i = 0; i < Call->Arguments.size(); ++i)
1511 MIB.addUse(Call->Arguments[i]);
1512 return true;
1513}
1514
1516 MachineIRBuilder &MIRBuilder,
1517 SPIRVGlobalRegistry *GR) {
1518 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1519 MachineFunction &MF = MIRBuilder.getMF();
1520 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1521 if (!ST->canUseExtension(
1522 SPIRV::Extension::SPV_KHR_uniform_group_instructions)) {
1523 std::string DiagMsg = std::string(Builtin->Name) +
1524 ": the builtin requires the following SPIR-V "
1525 "extension: SPV_KHR_uniform_group_instructions";
1526 report_fatal_error(DiagMsg.c_str(), false);
1527 }
1528 const SPIRV::GroupUniformBuiltin *GroupUniform =
1529 SPIRV::lookupGroupUniformBuiltin(Builtin->Name);
1530 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1531
1532 Register GroupResultReg = Call->ReturnRegister;
1533 Register ScopeReg = Call->Arguments[0];
1534 Register ValueReg = Call->Arguments[2];
1535
1536 // Group Operation
1537 Register ConstGroupOpReg = Call->Arguments[1];
1538 const MachineInstr *Const = getDefInstrMaybeConstant(ConstGroupOpReg, MRI);
1539 if (!Const || Const->getOpcode() != TargetOpcode::G_CONSTANT)
1541 "expect a constant group operation for a uniform group instruction",
1542 false);
1543 const MachineOperand &ConstOperand = Const->getOperand(1);
1544 if (!ConstOperand.isCImm())
1545 report_fatal_error("uniform group instructions: group operation must be an "
1546 "integer constant",
1547 false);
1548
1549 auto MIB = MIRBuilder.buildInstr(GroupUniform->Opcode)
1550 .addDef(GroupResultReg)
1551 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1552 .addUse(ScopeReg);
1553 addNumImm(ConstOperand.getCImm()->getValue(), MIB);
1554 MIB.addUse(ValueReg);
1555
1556 return true;
1557}
1558
1560 MachineIRBuilder &MIRBuilder,
1561 SPIRVGlobalRegistry *GR) {
1562 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1563 MachineFunction &MF = MIRBuilder.getMF();
1564 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1565 if (!ST->canUseExtension(SPIRV::Extension::SPV_KHR_shader_clock)) {
1566 std::string DiagMsg = std::string(Builtin->Name) +
1567 ": the builtin requires the following SPIR-V "
1568 "extension: SPV_KHR_shader_clock";
1569 report_fatal_error(DiagMsg.c_str(), false);
1570 }
1571
1572 Register ResultReg = Call->ReturnRegister;
1573
1574 if (Builtin->Name == "__spirv_ReadClockKHR") {
1575 MIRBuilder.buildInstr(SPIRV::OpReadClockKHR)
1576 .addDef(ResultReg)
1577 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1578 .addUse(Call->Arguments[0]);
1579 } else {
1580 // Deduce the `Scope` operand from the builtin function name.
1581 SPIRV::Scope::Scope ScopeArg =
1583 .EndsWith("device", SPIRV::Scope::Scope::Device)
1584 .EndsWith("work_group", SPIRV::Scope::Scope::Workgroup)
1585 .EndsWith("sub_group", SPIRV::Scope::Scope::Subgroup);
1586 Register ScopeReg = buildConstantIntReg32(ScopeArg, MIRBuilder, GR);
1587
1588 MIRBuilder.buildInstr(SPIRV::OpReadClockKHR)
1589 .addDef(ResultReg)
1590 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1591 .addUse(ScopeReg);
1592 }
1593
1594 return true;
1595}
1596
1597// These queries ask for a single size_t result for a given dimension index,
1598// e.g. size_t get_global_id(uint dimindex). In SPIR-V, the builtins
1599// corresponding to these values are all vec3 types, so we need to extract the
1600// correct index or return DefaultValue (0 or 1 depending on the query). We also
1601// handle extending or truncating in case size_t does not match the expected
1602// result type's bitwidth.
1603//
1604// For a constant index >= 3 we generate:
1605// %res = OpConstant %SizeT DefaultValue
1606//
1607// For other indices we generate:
1608// %g = OpVariable %ptr_V3_SizeT Input
1609// OpDecorate %g BuiltIn XXX
1610// OpDecorate %g LinkageAttributes "__spirv_BuiltInXXX"
1611// OpDecorate %g Constant
1612// %loadedVec = OpLoad %V3_SizeT %g
1613//
1614// Then, if the index is constant < 3, we generate:
1615// %res = OpCompositeExtract %SizeT %loadedVec idx
1616// If the index is dynamic, we generate:
1617// %tmp = OpVectorExtractDynamic %SizeT %loadedVec %idx
1618// %cmp = OpULessThan %bool %idx %const_3
1619// %res = OpSelect %SizeT %cmp %tmp %const_<DefaultValue>
1620//
1621// If the bitwidth of %res does not match the expected return type, we add an
1622// extend or truncate.
1624 MachineIRBuilder &MIRBuilder,
1626 SPIRV::BuiltIn::BuiltIn BuiltinValue,
1627 uint64_t DefaultValue) {
1628 Register IndexRegister = Call->Arguments[0];
1629 const unsigned ResultWidth = Call->ReturnType->getOperand(1).getImm();
1630 const unsigned PointerSize = GR->getPointerSize();
1631 const SPIRVType *PointerSizeType =
1632 GR->getOrCreateSPIRVIntegerType(PointerSize, MIRBuilder);
1633 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1634 auto IndexInstruction = getDefInstrMaybeConstant(IndexRegister, MRI);
1635
1636 // Set up the final register to do truncation or extension on at the end.
1637 Register ToTruncate = Call->ReturnRegister;
1638
1639 // If the index is constant, we can statically determine if it is in range.
1640 bool IsConstantIndex =
1641 IndexInstruction->getOpcode() == TargetOpcode::G_CONSTANT;
1642
1643 // If it's out of range (max dimension is 3), we can just return the constant
1644 // default value (0 or 1 depending on which query function).
1645 if (IsConstantIndex && getIConstVal(IndexRegister, MRI) >= 3) {
1646 Register DefaultReg = Call->ReturnRegister;
1647 if (PointerSize != ResultWidth) {
1648 DefaultReg = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1649 MRI->setRegClass(DefaultReg, &SPIRV::iIDRegClass);
1650 GR->assignSPIRVTypeToVReg(PointerSizeType, DefaultReg,
1651 MIRBuilder.getMF());
1652 ToTruncate = DefaultReg;
1653 }
1654 auto NewRegister =
1655 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType, true);
1656 MIRBuilder.buildCopy(DefaultReg, NewRegister);
1657 } else { // If it could be in range, we need to load from the given builtin.
1658 auto Vec3Ty =
1659 GR->getOrCreateSPIRVVectorType(PointerSizeType, 3, MIRBuilder, true);
1660 Register LoadedVector =
1661 buildBuiltinVariableLoad(MIRBuilder, Vec3Ty, GR, BuiltinValue,
1662 LLT::fixed_vector(3, PointerSize));
1663 // Set up the vreg to extract the result to (possibly a new temporary one).
1664 Register Extracted = Call->ReturnRegister;
1665 if (!IsConstantIndex || PointerSize != ResultWidth) {
1666 Extracted = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1667 MRI->setRegClass(Extracted, &SPIRV::iIDRegClass);
1668 GR->assignSPIRVTypeToVReg(PointerSizeType, Extracted, MIRBuilder.getMF());
1669 }
1670 // Use Intrinsic::spv_extractelt so dynamic vs static extraction is
1671 // handled later: extr = spv_extractelt LoadedVector, IndexRegister.
1672 MachineInstrBuilder ExtractInst = MIRBuilder.buildIntrinsic(
1673 Intrinsic::spv_extractelt, ArrayRef<Register>{Extracted}, true, false);
1674 ExtractInst.addUse(LoadedVector).addUse(IndexRegister);
1675
1676 // If the index is dynamic, need check if it's < 3, and then use a select.
1677 if (!IsConstantIndex) {
1678 updateRegType(Extracted, nullptr, PointerSizeType, GR, MIRBuilder, *MRI);
1679
1680 auto IndexType = GR->getSPIRVTypeForVReg(IndexRegister);
1681 auto BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder, true);
1682
1683 Register CompareRegister =
1684 MRI->createGenericVirtualRegister(LLT::scalar(1));
1685 MRI->setRegClass(CompareRegister, &SPIRV::iIDRegClass);
1686 GR->assignSPIRVTypeToVReg(BoolType, CompareRegister, MIRBuilder.getMF());
1687
1688 // Use G_ICMP to check if idxVReg < 3.
1689 MIRBuilder.buildICmp(
1690 CmpInst::ICMP_ULT, CompareRegister, IndexRegister,
1691 GR->buildConstantInt(3, MIRBuilder, IndexType, true));
1692
1693 // Get constant for the default value (0 or 1 depending on which
1694 // function).
1695 Register DefaultRegister =
1696 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType, true);
1697
1698 // Get a register for the selection result (possibly a new temporary one).
1699 Register SelectionResult = Call->ReturnRegister;
1700 if (PointerSize != ResultWidth) {
1701 SelectionResult =
1702 MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1703 MRI->setRegClass(SelectionResult, &SPIRV::iIDRegClass);
1704 GR->assignSPIRVTypeToVReg(PointerSizeType, SelectionResult,
1705 MIRBuilder.getMF());
1706 }
1707 // Create the final G_SELECT to return the extracted value or the default.
1708 MIRBuilder.buildSelect(SelectionResult, CompareRegister, Extracted,
1709 DefaultRegister);
1710 ToTruncate = SelectionResult;
1711 } else {
1712 ToTruncate = Extracted;
1713 }
1714 }
1715 // Alter the result's bitwidth if it does not match the SizeT value extracted.
1716 if (PointerSize != ResultWidth)
1717 MIRBuilder.buildZExtOrTrunc(Call->ReturnRegister, ToTruncate);
1718 return true;
1719}
1720
1722 MachineIRBuilder &MIRBuilder,
1723 SPIRVGlobalRegistry *GR) {
1724 // Lookup the builtin variable record.
1725 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1726 SPIRV::BuiltIn::BuiltIn Value =
1727 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1728
1729 if (Value == SPIRV::BuiltIn::GlobalInvocationId)
1730 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, 0);
1731
1732 // Build a load instruction for the builtin variable.
1733 unsigned BitWidth = GR->getScalarOrVectorBitWidth(Call->ReturnType);
1734 LLT LLType;
1735 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeVector)
1736 LLType =
1737 LLT::fixed_vector(Call->ReturnType->getOperand(2).getImm(), BitWidth);
1738 else
1739 LLType = LLT::scalar(BitWidth);
1740
1741 return buildBuiltinVariableLoad(MIRBuilder, Call->ReturnType, GR, Value,
1742 LLType, Call->ReturnRegister);
1743}
1744
1746 MachineIRBuilder &MIRBuilder,
1747 SPIRVGlobalRegistry *GR) {
1748 // Lookup the instruction opcode in the TableGen records.
1749 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1750 unsigned Opcode =
1751 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1752
1753 switch (Opcode) {
1754 case SPIRV::OpStore:
1755 return buildAtomicInitInst(Call, MIRBuilder);
1756 case SPIRV::OpAtomicLoad:
1757 return buildAtomicLoadInst(Call, MIRBuilder, GR);
1758 case SPIRV::OpAtomicStore:
1759 return buildAtomicStoreInst(Call, MIRBuilder, GR);
1760 case SPIRV::OpAtomicCompareExchange:
1761 case SPIRV::OpAtomicCompareExchangeWeak:
1762 return buildAtomicCompareExchangeInst(Call, Builtin, Opcode, MIRBuilder,
1763 GR);
1764 case SPIRV::OpAtomicIAdd:
1765 case SPIRV::OpAtomicISub:
1766 case SPIRV::OpAtomicOr:
1767 case SPIRV::OpAtomicXor:
1768 case SPIRV::OpAtomicAnd:
1769 case SPIRV::OpAtomicExchange:
1770 return buildAtomicRMWInst(Call, Opcode, MIRBuilder, GR);
1771 case SPIRV::OpMemoryBarrier:
1772 return buildBarrierInst(Call, SPIRV::OpMemoryBarrier, MIRBuilder, GR);
1773 case SPIRV::OpAtomicFlagTestAndSet:
1774 case SPIRV::OpAtomicFlagClear:
1775 return buildAtomicFlagInst(Call, Opcode, MIRBuilder, GR);
1776 default:
1777 if (Call->isSpirvOp())
1778 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1779 GR->getSPIRVTypeID(Call->ReturnType));
1780 return false;
1781 }
1782}
1783
1785 MachineIRBuilder &MIRBuilder,
1786 SPIRVGlobalRegistry *GR) {
1787 // Lookup the instruction opcode in the TableGen records.
1788 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1789 unsigned Opcode = SPIRV::lookupAtomicFloatingBuiltin(Builtin->Name)->Opcode;
1790
1791 switch (Opcode) {
1792 case SPIRV::OpAtomicFAddEXT:
1793 case SPIRV::OpAtomicFMinEXT:
1794 case SPIRV::OpAtomicFMaxEXT:
1795 return buildAtomicFloatingRMWInst(Call, Opcode, MIRBuilder, GR);
1796 default:
1797 return false;
1798 }
1799}
1800
1802 MachineIRBuilder &MIRBuilder,
1803 SPIRVGlobalRegistry *GR) {
1804 // Lookup the instruction opcode in the TableGen records.
1805 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1806 unsigned Opcode =
1807 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1808
1809 return buildBarrierInst(Call, Opcode, MIRBuilder, GR);
1810}
1811
1813 MachineIRBuilder &MIRBuilder,
1814 SPIRVGlobalRegistry *GR) {
1815 // Lookup the instruction opcode in the TableGen records.
1816 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1817 unsigned Opcode =
1818 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1819
1820 if (Opcode == SPIRV::OpGenericCastToPtrExplicit) {
1821 SPIRV::StorageClass::StorageClass ResSC =
1822 GR->getPointerStorageClass(Call->ReturnRegister);
1823 if (!isGenericCastablePtr(ResSC))
1824 return false;
1825
1826 MIRBuilder.buildInstr(Opcode)
1827 .addDef(Call->ReturnRegister)
1828 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1829 .addUse(Call->Arguments[0])
1830 .addImm(ResSC);
1831 } else {
1832 MIRBuilder.buildInstr(TargetOpcode::G_ADDRSPACE_CAST)
1833 .addDef(Call->ReturnRegister)
1834 .addUse(Call->Arguments[0]);
1835 }
1836 return true;
1837}
1838
1839static bool generateDotOrFMulInst(const StringRef DemangledCall,
1841 MachineIRBuilder &MIRBuilder,
1842 SPIRVGlobalRegistry *GR) {
1843 if (Call->isSpirvOp())
1844 return buildOpFromWrapper(MIRBuilder, SPIRV::OpDot, Call,
1845 GR->getSPIRVTypeID(Call->ReturnType));
1846
1847 bool IsVec = GR->getSPIRVTypeForVReg(Call->Arguments[0])->getOpcode() ==
1848 SPIRV::OpTypeVector;
1849 // Use OpDot only in case of vector args and OpFMul in case of scalar args.
1850 uint32_t OC = IsVec ? SPIRV::OpDot : SPIRV::OpFMulS;
1851 bool IsSwapReq = false;
1852
1853 const auto *ST =
1854 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
1855 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt) &&
1856 (ST->canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
1857 ST->isAtLeastSPIRVVer(VersionTuple(1, 6)))) {
1858 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1859 const SPIRV::IntegerDotProductBuiltin *IntDot =
1860 SPIRV::lookupIntegerDotProductBuiltin(Builtin->Name);
1861 if (IntDot) {
1862 OC = IntDot->Opcode;
1863 IsSwapReq = IntDot->IsSwapReq;
1864 } else if (IsVec) {
1865 // Handling "dot" and "dot_acc_sat" builtins which use vectors of
1866 // integers.
1867 LLVMContext &Ctx = MIRBuilder.getContext();
1869 SPIRV::parseBuiltinTypeStr(TypeStrs, DemangledCall, Ctx);
1870 bool IsFirstSigned = TypeStrs[0].trim()[0] != 'u';
1871 bool IsSecondSigned = TypeStrs[1].trim()[0] != 'u';
1872
1873 if (Call->BuiltinName == "dot") {
1874 if (IsFirstSigned && IsSecondSigned)
1875 OC = SPIRV::OpSDot;
1876 else if (!IsFirstSigned && !IsSecondSigned)
1877 OC = SPIRV::OpUDot;
1878 else {
1879 OC = SPIRV::OpSUDot;
1880 if (!IsFirstSigned)
1881 IsSwapReq = true;
1882 }
1883 } else if (Call->BuiltinName == "dot_acc_sat") {
1884 if (IsFirstSigned && IsSecondSigned)
1885 OC = SPIRV::OpSDotAccSat;
1886 else if (!IsFirstSigned && !IsSecondSigned)
1887 OC = SPIRV::OpUDotAccSat;
1888 else {
1889 OC = SPIRV::OpSUDotAccSat;
1890 if (!IsFirstSigned)
1891 IsSwapReq = true;
1892 }
1893 }
1894 }
1895 }
1896
1897 MachineInstrBuilder MIB = MIRBuilder.buildInstr(OC)
1898 .addDef(Call->ReturnRegister)
1899 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1900
1901 if (IsSwapReq) {
1902 MIB.addUse(Call->Arguments[1]);
1903 MIB.addUse(Call->Arguments[0]);
1904 // needed for dot_acc_sat* builtins
1905 for (size_t i = 2; i < Call->Arguments.size(); ++i)
1906 MIB.addUse(Call->Arguments[i]);
1907 } else {
1908 for (size_t i = 0; i < Call->Arguments.size(); ++i)
1909 MIB.addUse(Call->Arguments[i]);
1910 }
1911
1912 // Add Packed Vector Format for Integer dot product builtins if arguments are
1913 // scalar
1914 if (!IsVec && OC != SPIRV::OpFMulS)
1915 MIB.addImm(SPIRV::PackedVectorFormat4x8Bit);
1916
1917 return true;
1918}
1919
1921 MachineIRBuilder &MIRBuilder,
1922 SPIRVGlobalRegistry *GR) {
1923 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1924 SPIRV::BuiltIn::BuiltIn Value =
1925 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1926
1927 // For now, we only support a single Wave intrinsic with a single return type.
1928 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt);
1929 LLT LLType = LLT::scalar(GR->getScalarOrVectorBitWidth(Call->ReturnType));
1930
1932 MIRBuilder, Call->ReturnType, GR, Value, LLType, Call->ReturnRegister,
1933 /* isConst= */ false, /* LinkageType= */ std::nullopt);
1934}
1935
1936// We expect a builtin
1937// Name(ptr sret([RetType]) %result, Type %operand1, Type %operand1)
1938// where %result is a pointer to where the result of the builtin execution
1939// is to be stored, and generate the following instructions:
1940// Res = Opcode RetType Operand1 Operand1
1941// OpStore RetVariable Res
1943 MachineIRBuilder &MIRBuilder,
1944 SPIRVGlobalRegistry *GR) {
1945 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1946 unsigned Opcode =
1947 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1948
1949 Register SRetReg = Call->Arguments[0];
1950 SPIRVType *PtrRetType = GR->getSPIRVTypeForVReg(SRetReg);
1951 SPIRVType *RetType = GR->getPointeeType(PtrRetType);
1952 if (!RetType)
1953 report_fatal_error("The first parameter must be a pointer");
1954 if (RetType->getOpcode() != SPIRV::OpTypeStruct)
1955 report_fatal_error("Expected struct type result for the arithmetic with "
1956 "overflow builtins");
1957
1958 SPIRVType *OpType1 = GR->getSPIRVTypeForVReg(Call->Arguments[1]);
1959 SPIRVType *OpType2 = GR->getSPIRVTypeForVReg(Call->Arguments[2]);
1960 if (!OpType1 || !OpType2 || OpType1 != OpType2)
1961 report_fatal_error("Operands must have the same type");
1962 if (OpType1->getOpcode() == SPIRV::OpTypeVector)
1963 switch (Opcode) {
1964 case SPIRV::OpIAddCarryS:
1965 Opcode = SPIRV::OpIAddCarryV;
1966 break;
1967 case SPIRV::OpISubBorrowS:
1968 Opcode = SPIRV::OpISubBorrowV;
1969 break;
1970 }
1971
1972 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1973 Register ResReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1974 if (const TargetRegisterClass *DstRC =
1975 MRI->getRegClassOrNull(Call->Arguments[1])) {
1976 MRI->setRegClass(ResReg, DstRC);
1977 MRI->setType(ResReg, MRI->getType(Call->Arguments[1]));
1978 } else {
1979 MRI->setType(ResReg, LLT::scalar(64));
1980 }
1981 GR->assignSPIRVTypeToVReg(RetType, ResReg, MIRBuilder.getMF());
1982 MIRBuilder.buildInstr(Opcode)
1983 .addDef(ResReg)
1984 .addUse(GR->getSPIRVTypeID(RetType))
1985 .addUse(Call->Arguments[1])
1986 .addUse(Call->Arguments[2]);
1987 MIRBuilder.buildInstr(SPIRV::OpStore).addUse(SRetReg).addUse(ResReg);
1988 return true;
1989}
1990
1992 MachineIRBuilder &MIRBuilder,
1993 SPIRVGlobalRegistry *GR) {
1994 // Lookup the builtin record.
1995 SPIRV::BuiltIn::BuiltIn Value =
1996 SPIRV::lookupGetBuiltin(Call->Builtin->Name, Call->Builtin->Set)->Value;
1997 const bool IsDefaultOne = (Value == SPIRV::BuiltIn::GlobalSize ||
1998 Value == SPIRV::BuiltIn::NumWorkgroups ||
1999 Value == SPIRV::BuiltIn::WorkgroupSize ||
2000 Value == SPIRV::BuiltIn::EnqueuedWorkgroupSize);
2001 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, IsDefaultOne ? 1 : 0);
2002}
2003
2005 MachineIRBuilder &MIRBuilder,
2006 SPIRVGlobalRegistry *GR) {
2007 // Lookup the image size query component number in the TableGen records.
2008 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2009 uint32_t Component =
2010 SPIRV::lookupImageQueryBuiltin(Builtin->Name, Builtin->Set)->Component;
2011 // Query result may either be a vector or a scalar. If return type is not a
2012 // vector, expect only a single size component. Otherwise get the number of
2013 // expected components.
2014 unsigned NumExpectedRetComponents =
2015 Call->ReturnType->getOpcode() == SPIRV::OpTypeVector
2016 ? Call->ReturnType->getOperand(2).getImm()
2017 : 1;
2018 // Get the actual number of query result/size components.
2019 SPIRVType *ImgType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
2020 unsigned NumActualRetComponents = getNumSizeComponents(ImgType);
2021 Register QueryResult = Call->ReturnRegister;
2022 SPIRVType *QueryResultType = Call->ReturnType;
2023 if (NumExpectedRetComponents != NumActualRetComponents) {
2024 unsigned Bitwidth = Call->ReturnType->getOpcode() == SPIRV::OpTypeInt
2025 ? Call->ReturnType->getOperand(1).getImm()
2026 : 32;
2027 QueryResult = MIRBuilder.getMRI()->createGenericVirtualRegister(
2028 LLT::fixed_vector(NumActualRetComponents, Bitwidth));
2029 MIRBuilder.getMRI()->setRegClass(QueryResult, &SPIRV::vIDRegClass);
2030 SPIRVType *IntTy = GR->getOrCreateSPIRVIntegerType(Bitwidth, MIRBuilder);
2031 QueryResultType = GR->getOrCreateSPIRVVectorType(
2032 IntTy, NumActualRetComponents, MIRBuilder, true);
2033 GR->assignSPIRVTypeToVReg(QueryResultType, QueryResult, MIRBuilder.getMF());
2034 }
2035 bool IsDimBuf = ImgType->getOperand(2).getImm() == SPIRV::Dim::DIM_Buffer;
2036 unsigned Opcode =
2037 IsDimBuf ? SPIRV::OpImageQuerySize : SPIRV::OpImageQuerySizeLod;
2038 auto MIB = MIRBuilder.buildInstr(Opcode)
2039 .addDef(QueryResult)
2040 .addUse(GR->getSPIRVTypeID(QueryResultType))
2041 .addUse(Call->Arguments[0]);
2042 if (!IsDimBuf)
2043 MIB.addUse(buildConstantIntReg32(0, MIRBuilder, GR)); // Lod id.
2044 if (NumExpectedRetComponents == NumActualRetComponents)
2045 return true;
2046 if (NumExpectedRetComponents == 1) {
2047 // Only 1 component is expected, build OpCompositeExtract instruction.
2048 unsigned ExtractedComposite =
2049 Component == 3 ? NumActualRetComponents - 1 : Component;
2050 assert(ExtractedComposite < NumActualRetComponents &&
2051 "Invalid composite index!");
2052 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2053 SPIRVType *NewType = nullptr;
2054 if (QueryResultType->getOpcode() == SPIRV::OpTypeVector) {
2055 Register NewTypeReg = QueryResultType->getOperand(1).getReg();
2056 if (TypeReg != NewTypeReg &&
2057 (NewType = GR->getSPIRVTypeForVReg(NewTypeReg)) != nullptr)
2058 TypeReg = NewTypeReg;
2059 }
2060 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
2061 .addDef(Call->ReturnRegister)
2062 .addUse(TypeReg)
2063 .addUse(QueryResult)
2064 .addImm(ExtractedComposite);
2065 if (NewType != nullptr)
2066 updateRegType(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder,
2067 MIRBuilder.getMF().getRegInfo());
2068 } else {
2069 // More than 1 component is expected, fill a new vector.
2070 auto MIB = MIRBuilder.buildInstr(SPIRV::OpVectorShuffle)
2071 .addDef(Call->ReturnRegister)
2072 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2073 .addUse(QueryResult)
2074 .addUse(QueryResult);
2075 for (unsigned i = 0; i < NumExpectedRetComponents; ++i)
2076 MIB.addImm(i < NumActualRetComponents ? i : 0xffffffff);
2077 }
2078 return true;
2079}
2080
2082 MachineIRBuilder &MIRBuilder,
2083 SPIRVGlobalRegistry *GR) {
2084 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt &&
2085 "Image samples query result must be of int type!");
2086
2087 // Lookup the instruction opcode in the TableGen records.
2088 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2089 unsigned Opcode =
2090 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2091
2092 Register Image = Call->Arguments[0];
2093 SPIRV::Dim::Dim ImageDimensionality = static_cast<SPIRV::Dim::Dim>(
2094 GR->getSPIRVTypeForVReg(Image)->getOperand(2).getImm());
2095 (void)ImageDimensionality;
2096
2097 switch (Opcode) {
2098 case SPIRV::OpImageQuerySamples:
2099 assert(ImageDimensionality == SPIRV::Dim::DIM_2D &&
2100 "Image must be of 2D dimensionality");
2101 break;
2102 case SPIRV::OpImageQueryLevels:
2103 assert((ImageDimensionality == SPIRV::Dim::DIM_1D ||
2104 ImageDimensionality == SPIRV::Dim::DIM_2D ||
2105 ImageDimensionality == SPIRV::Dim::DIM_3D ||
2106 ImageDimensionality == SPIRV::Dim::DIM_Cube) &&
2107 "Image must be of 1D/2D/3D/Cube dimensionality");
2108 break;
2109 }
2110
2111 MIRBuilder.buildInstr(Opcode)
2112 .addDef(Call->ReturnRegister)
2113 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2114 .addUse(Image);
2115 return true;
2116}
2117
2118// TODO: Move to TableGen.
2119static SPIRV::SamplerAddressingMode::SamplerAddressingMode
2121 switch (Bitmask & SPIRV::CLK_ADDRESS_MODE_MASK) {
2122 case SPIRV::CLK_ADDRESS_CLAMP:
2123 return SPIRV::SamplerAddressingMode::Clamp;
2124 case SPIRV::CLK_ADDRESS_CLAMP_TO_EDGE:
2125 return SPIRV::SamplerAddressingMode::ClampToEdge;
2126 case SPIRV::CLK_ADDRESS_REPEAT:
2127 return SPIRV::SamplerAddressingMode::Repeat;
2128 case SPIRV::CLK_ADDRESS_MIRRORED_REPEAT:
2129 return SPIRV::SamplerAddressingMode::RepeatMirrored;
2130 case SPIRV::CLK_ADDRESS_NONE:
2131 return SPIRV::SamplerAddressingMode::None;
2132 default:
2133 report_fatal_error("Unknown CL address mode");
2134 }
2135}
2136
2137static unsigned getSamplerParamFromBitmask(unsigned Bitmask) {
2138 return (Bitmask & SPIRV::CLK_NORMALIZED_COORDS_TRUE) ? 1 : 0;
2139}
2140
2141static SPIRV::SamplerFilterMode::SamplerFilterMode
2143 if (Bitmask & SPIRV::CLK_FILTER_LINEAR)
2144 return SPIRV::SamplerFilterMode::Linear;
2145 if (Bitmask & SPIRV::CLK_FILTER_NEAREST)
2146 return SPIRV::SamplerFilterMode::Nearest;
2147 return SPIRV::SamplerFilterMode::Nearest;
2148}
2149
2150static bool generateReadImageInst(const StringRef DemangledCall,
2152 MachineIRBuilder &MIRBuilder,
2153 SPIRVGlobalRegistry *GR) {
2154 if (Call->isSpirvOp())
2155 return buildOpFromWrapper(MIRBuilder, SPIRV::OpImageRead, Call,
2156 GR->getSPIRVTypeID(Call->ReturnType));
2157 Register Image = Call->Arguments[0];
2158 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2159 bool HasOclSampler = DemangledCall.contains_insensitive("ocl_sampler");
2160 bool HasMsaa = DemangledCall.contains_insensitive("msaa");
2161 if (HasOclSampler) {
2162 Register Sampler = Call->Arguments[1];
2163
2164 if (!GR->isScalarOfType(Sampler, SPIRV::OpTypeSampler) &&
2165 getDefInstrMaybeConstant(Sampler, MRI)->getOperand(1).isCImm()) {
2166 uint64_t SamplerMask = getIConstVal(Sampler, MRI);
2167 Sampler = GR->buildConstantSampler(
2169 getSamplerParamFromBitmask(SamplerMask),
2170 getSamplerFilterModeFromBitmask(SamplerMask), MIRBuilder);
2171 }
2172 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
2173 SPIRVType *SampledImageType =
2174 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
2175 Register SampledImage = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2176
2177 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
2178 .addDef(SampledImage)
2179 .addUse(GR->getSPIRVTypeID(SampledImageType))
2180 .addUse(Image)
2181 .addUse(Sampler);
2182
2184 MIRBuilder);
2185
2186 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeVector) {
2187 SPIRVType *TempType =
2188 GR->getOrCreateSPIRVVectorType(Call->ReturnType, 4, MIRBuilder, true);
2189 Register TempRegister =
2190 MRI->createGenericVirtualRegister(GR->getRegType(TempType));
2191 MRI->setRegClass(TempRegister, GR->getRegClass(TempType));
2192 GR->assignSPIRVTypeToVReg(TempType, TempRegister, MIRBuilder.getMF());
2193 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
2194 .addDef(TempRegister)
2195 .addUse(GR->getSPIRVTypeID(TempType))
2196 .addUse(SampledImage)
2197 .addUse(Call->Arguments[2]) // Coordinate.
2198 .addImm(SPIRV::ImageOperand::Lod)
2199 .addUse(Lod);
2200 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
2201 .addDef(Call->ReturnRegister)
2202 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2203 .addUse(TempRegister)
2204 .addImm(0);
2205 } else {
2206 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
2207 .addDef(Call->ReturnRegister)
2208 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2209 .addUse(SampledImage)
2210 .addUse(Call->Arguments[2]) // Coordinate.
2211 .addImm(SPIRV::ImageOperand::Lod)
2212 .addUse(Lod);
2213 }
2214 } else if (HasMsaa) {
2215 MIRBuilder.buildInstr(SPIRV::OpImageRead)
2216 .addDef(Call->ReturnRegister)
2217 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2218 .addUse(Image)
2219 .addUse(Call->Arguments[1]) // Coordinate.
2220 .addImm(SPIRV::ImageOperand::Sample)
2221 .addUse(Call->Arguments[2]);
2222 } else {
2223 MIRBuilder.buildInstr(SPIRV::OpImageRead)
2224 .addDef(Call->ReturnRegister)
2225 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2226 .addUse(Image)
2227 .addUse(Call->Arguments[1]); // Coordinate.
2228 }
2229 return true;
2230}
2231
2233 MachineIRBuilder &MIRBuilder,
2234 SPIRVGlobalRegistry *GR) {
2235 if (Call->isSpirvOp())
2236 return buildOpFromWrapper(MIRBuilder, SPIRV::OpImageWrite, Call,
2237 Register(0));
2238 MIRBuilder.buildInstr(SPIRV::OpImageWrite)
2239 .addUse(Call->Arguments[0]) // Image.
2240 .addUse(Call->Arguments[1]) // Coordinate.
2241 .addUse(Call->Arguments[2]); // Texel.
2242 return true;
2243}
2244
2245static bool generateSampleImageInst(const StringRef DemangledCall,
2247 MachineIRBuilder &MIRBuilder,
2248 SPIRVGlobalRegistry *GR) {
2249 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2250 if (Call->Builtin->Name.contains_insensitive(
2251 "__translate_sampler_initializer")) {
2252 // Build sampler literal.
2253 uint64_t Bitmask = getIConstVal(Call->Arguments[0], MRI);
2254 Register Sampler = GR->buildConstantSampler(
2255 Call->ReturnRegister, getSamplerAddressingModeFromBitmask(Bitmask),
2257 getSamplerFilterModeFromBitmask(Bitmask), MIRBuilder);
2258 return Sampler.isValid();
2259 } else if (Call->Builtin->Name.contains_insensitive("__spirv_SampledImage")) {
2260 // Create OpSampledImage.
2261 Register Image = Call->Arguments[0];
2262 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
2263 SPIRVType *SampledImageType =
2264 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
2265 Register SampledImage =
2266 Call->ReturnRegister.isValid()
2267 ? Call->ReturnRegister
2268 : MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2269 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
2270 .addDef(SampledImage)
2271 .addUse(GR->getSPIRVTypeID(SampledImageType))
2272 .addUse(Image)
2273 .addUse(Call->Arguments[1]); // Sampler.
2274 return true;
2275 } else if (Call->Builtin->Name.contains_insensitive(
2276 "__spirv_ImageSampleExplicitLod")) {
2277 // Sample an image using an explicit level of detail.
2278 std::string ReturnType = DemangledCall.str();
2279 if (DemangledCall.contains("_R")) {
2280 ReturnType = ReturnType.substr(ReturnType.find("_R") + 2);
2281 ReturnType = ReturnType.substr(0, ReturnType.find('('));
2282 }
2283 SPIRVType *Type =
2284 Call->ReturnType
2285 ? Call->ReturnType
2286 : GR->getOrCreateSPIRVTypeByName(ReturnType, MIRBuilder, true);
2287 if (!Type) {
2288 std::string DiagMsg =
2289 "Unable to recognize SPIRV type name: " + ReturnType;
2290 report_fatal_error(DiagMsg.c_str());
2291 }
2292 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
2293 .addDef(Call->ReturnRegister)
2295 .addUse(Call->Arguments[0]) // Image.
2296 .addUse(Call->Arguments[1]) // Coordinate.
2297 .addImm(SPIRV::ImageOperand::Lod)
2298 .addUse(Call->Arguments[3]);
2299 return true;
2300 }
2301 return false;
2302}
2303
2305 MachineIRBuilder &MIRBuilder) {
2306 MIRBuilder.buildSelect(Call->ReturnRegister, Call->Arguments[0],
2307 Call->Arguments[1], Call->Arguments[2]);
2308 return true;
2309}
2310
2312 MachineIRBuilder &MIRBuilder,
2313 SPIRVGlobalRegistry *GR) {
2314 createContinuedInstructions(MIRBuilder, SPIRV::OpCompositeConstruct, 3,
2315 SPIRV::OpCompositeConstructContinuedINTEL,
2316 Call->Arguments, Call->ReturnRegister,
2317 GR->getSPIRVTypeID(Call->ReturnType));
2318 return true;
2319}
2320
2322 MachineIRBuilder &MIRBuilder,
2323 SPIRVGlobalRegistry *GR) {
2324 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2325 unsigned Opcode =
2326 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2327 bool IsSet = Opcode != SPIRV::OpCooperativeMatrixStoreKHR &&
2328 Opcode != SPIRV::OpCooperativeMatrixStoreCheckedINTEL &&
2329 Opcode != SPIRV::OpCooperativeMatrixPrefetchINTEL;
2330 unsigned ArgSz = Call->Arguments.size();
2331 unsigned LiteralIdx = 0;
2332 switch (Opcode) {
2333 // Memory operand is optional and is literal.
2334 case SPIRV::OpCooperativeMatrixLoadKHR:
2335 LiteralIdx = ArgSz > 3 ? 3 : 0;
2336 break;
2337 case SPIRV::OpCooperativeMatrixStoreKHR:
2338 LiteralIdx = ArgSz > 4 ? 4 : 0;
2339 break;
2340 case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
2341 LiteralIdx = ArgSz > 7 ? 7 : 0;
2342 break;
2343 case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
2344 LiteralIdx = ArgSz > 8 ? 8 : 0;
2345 break;
2346 // Cooperative Matrix Operands operand is optional and is literal.
2347 case SPIRV::OpCooperativeMatrixMulAddKHR:
2348 LiteralIdx = ArgSz > 3 ? 3 : 0;
2349 break;
2350 };
2351
2353 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2354 if (Opcode == SPIRV::OpCooperativeMatrixPrefetchINTEL) {
2355 const uint32_t CacheLevel = getConstFromIntrinsic(Call->Arguments[3], MRI);
2356 auto MIB = MIRBuilder.buildInstr(SPIRV::OpCooperativeMatrixPrefetchINTEL)
2357 .addUse(Call->Arguments[0]) // pointer
2358 .addUse(Call->Arguments[1]) // rows
2359 .addUse(Call->Arguments[2]) // columns
2360 .addImm(CacheLevel) // cache level
2361 .addUse(Call->Arguments[4]); // memory layout
2362 if (ArgSz > 5)
2363 MIB.addUse(Call->Arguments[5]); // stride
2364 if (ArgSz > 6) {
2365 const uint32_t MemOp = getConstFromIntrinsic(Call->Arguments[6], MRI);
2366 MIB.addImm(MemOp); // memory operand
2367 }
2368 return true;
2369 }
2370 if (LiteralIdx > 0)
2371 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[LiteralIdx], MRI));
2372 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2373 if (Opcode == SPIRV::OpCooperativeMatrixLengthKHR) {
2374 SPIRVType *CoopMatrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
2375 if (!CoopMatrType)
2376 report_fatal_error("Can't find a register's type definition");
2377 MIRBuilder.buildInstr(Opcode)
2378 .addDef(Call->ReturnRegister)
2379 .addUse(TypeReg)
2380 .addUse(CoopMatrType->getOperand(0).getReg());
2381 return true;
2382 }
2383 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2384 IsSet ? TypeReg : Register(0), ImmArgs);
2385}
2386
2388 MachineIRBuilder &MIRBuilder,
2389 SPIRVGlobalRegistry *GR) {
2390 // Lookup the instruction opcode in the TableGen records.
2391 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2392 unsigned Opcode =
2393 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2394 const MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2395
2396 switch (Opcode) {
2397 case SPIRV::OpSpecConstant: {
2398 // Build the SpecID decoration.
2399 unsigned SpecId =
2400 static_cast<unsigned>(getIConstVal(Call->Arguments[0], MRI));
2401 buildOpDecorate(Call->ReturnRegister, MIRBuilder, SPIRV::Decoration::SpecId,
2402 {SpecId});
2403 // Determine the constant MI.
2404 Register ConstRegister = Call->Arguments[1];
2405 const MachineInstr *Const = getDefInstrMaybeConstant(ConstRegister, MRI);
2406 assert(Const &&
2407 (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
2408 Const->getOpcode() == TargetOpcode::G_FCONSTANT) &&
2409 "Argument should be either an int or floating-point constant");
2410 // Determine the opcode and built the OpSpec MI.
2411 const MachineOperand &ConstOperand = Const->getOperand(1);
2412 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeBool) {
2413 assert(ConstOperand.isCImm() && "Int constant operand is expected");
2414 Opcode = ConstOperand.getCImm()->getValue().getZExtValue()
2415 ? SPIRV::OpSpecConstantTrue
2416 : SPIRV::OpSpecConstantFalse;
2417 }
2418 auto MIB = MIRBuilder.buildInstr(Opcode)
2419 .addDef(Call->ReturnRegister)
2420 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2421
2422 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeBool) {
2423 if (Const->getOpcode() == TargetOpcode::G_CONSTANT)
2424 addNumImm(ConstOperand.getCImm()->getValue(), MIB);
2425 else
2426 addNumImm(ConstOperand.getFPImm()->getValueAPF().bitcastToAPInt(), MIB);
2427 }
2428 return true;
2429 }
2430 case SPIRV::OpSpecConstantComposite: {
2431 createContinuedInstructions(MIRBuilder, Opcode, 3,
2432 SPIRV::OpSpecConstantCompositeContinuedINTEL,
2433 Call->Arguments, Call->ReturnRegister,
2434 GR->getSPIRVTypeID(Call->ReturnType));
2435 return true;
2436 }
2437 default:
2438 return false;
2439 }
2440}
2441
2443 MachineIRBuilder &MIRBuilder,
2444 SPIRVGlobalRegistry *GR) {
2445 // Lookup the instruction opcode in the TableGen records.
2446 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2447 unsigned Opcode =
2448 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2449
2450 return buildExtendedBitOpsInst(Call, Opcode, MIRBuilder, GR);
2451}
2452
2454 MachineIRBuilder &MIRBuilder,
2455 SPIRVGlobalRegistry *GR) {
2456 // Lookup the instruction opcode in the TableGen records.
2457 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2458 unsigned Opcode =
2459 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2460
2461 return buildBindlessImageINTELInst(Call, Opcode, MIRBuilder, GR);
2462}
2463
2465 MachineIRBuilder &MIRBuilder,
2466 SPIRVGlobalRegistry *GR) {
2467 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2468 unsigned Opcode =
2469 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2470 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
2471}
2472
2474 unsigned Opcode, MachineIRBuilder &MIRBuilder,
2475 SPIRVGlobalRegistry *GR) {
2476 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2478 Register InputReg = Call->Arguments[0];
2479 const Type *RetTy = GR->getTypeForSPIRVType(Call->ReturnType);
2480 bool IsSRet = RetTy->isVoidTy();
2481
2482 if (IsSRet) {
2483 const LLT ValTy = MRI->getType(InputReg);
2484 Register ActualRetValReg = MRI->createGenericVirtualRegister(ValTy);
2485 SPIRVType *InstructionType =
2486 GR->getPointeeType(GR->getSPIRVTypeForVReg(InputReg));
2487 InputReg = Call->Arguments[1];
2488 auto InputType = GR->getTypeForSPIRVType(GR->getSPIRVTypeForVReg(InputReg));
2489 Register PtrInputReg;
2490 if (InputType->getTypeID() == llvm::Type::TypeID::TypedPointerTyID) {
2491 LLT InputLLT = MRI->getType(InputReg);
2492 PtrInputReg = MRI->createGenericVirtualRegister(InputLLT);
2493 SPIRVType *PtrType =
2494 GR->getPointeeType(GR->getSPIRVTypeForVReg(InputReg));
2495 MachineMemOperand *MMO1 = MIRBuilder.getMF().getMachineMemOperand(
2497 InputLLT.getSizeInBytes(), Align(4));
2498 MIRBuilder.buildLoad(PtrInputReg, InputReg, *MMO1);
2499 MRI->setRegClass(PtrInputReg, &SPIRV::iIDRegClass);
2500 GR->assignSPIRVTypeToVReg(PtrType, PtrInputReg, MIRBuilder.getMF());
2501 }
2502
2503 for (unsigned index = 2; index < 7; index++) {
2504 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[index], MRI));
2505 }
2506
2507 // Emit the instruction
2508 auto MIB = MIRBuilder.buildInstr(Opcode)
2509 .addDef(ActualRetValReg)
2510 .addUse(GR->getSPIRVTypeID(InstructionType));
2511 if (PtrInputReg)
2512 MIB.addUse(PtrInputReg);
2513 else
2514 MIB.addUse(InputReg);
2515
2516 for (uint32_t Imm : ImmArgs)
2517 MIB.addImm(Imm);
2518 unsigned Size = ValTy.getSizeInBytes();
2519 // Store result to the pointer passed in Arg[0]
2520 MachineMemOperand *MMO = MIRBuilder.getMF().getMachineMemOperand(
2522 MRI->setRegClass(ActualRetValReg, &SPIRV::pIDRegClass);
2523 MIRBuilder.buildStore(ActualRetValReg, Call->Arguments[0], *MMO);
2524 return true;
2525 } else {
2526 for (unsigned index = 1; index < 6; index++)
2527 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[index], MRI));
2528
2529 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2530 GR->getSPIRVTypeID(Call->ReturnType), ImmArgs);
2531 }
2532}
2533
2535 MachineIRBuilder &MIRBuilder,
2536 SPIRVGlobalRegistry *GR) {
2537 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2538 unsigned Opcode =
2539 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2540
2541 return buildAPFixedPointInst(Call, Opcode, MIRBuilder, GR);
2542}
2543
2544static bool
2546 MachineIRBuilder &MIRBuilder,
2547 SPIRVGlobalRegistry *GR) {
2548 // Lookup the instruction opcode in the TableGen records.
2549 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2550 unsigned Opcode =
2551 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2552
2553 return buildTernaryBitwiseFunctionINTELInst(Call, Opcode, MIRBuilder, GR);
2554}
2555
2557 MachineIRBuilder &MIRBuilder,
2558 SPIRVGlobalRegistry *GR) {
2559 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2560 unsigned Opcode =
2561 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2562
2563 return buildImageChannelDataTypeInst(Call, Opcode, MIRBuilder, GR);
2564}
2565
2567 MachineIRBuilder &MIRBuilder,
2568 SPIRVGlobalRegistry *GR) {
2569 // Lookup the instruction opcode in the TableGen records.
2570 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2571 unsigned Opcode =
2572 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2573
2574 return build2DBlockIOINTELInst(Call, Opcode, MIRBuilder, GR);
2575}
2576
2578 MachineIRBuilder &MIRBuilder,
2579 SPIRVGlobalRegistry *GR) {
2580 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2581 unsigned Opcode =
2582 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2583
2584 unsigned Scope = SPIRV::Scope::Workgroup;
2585 if (Builtin->Name.contains("sub_group"))
2586 Scope = SPIRV::Scope::Subgroup;
2587
2588 return buildPipeInst(Call, Opcode, Scope, MIRBuilder, GR);
2589}
2590
2592 MachineIRBuilder &MIRBuilder,
2593 SPIRVGlobalRegistry *GR) {
2594 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2595 unsigned Opcode =
2596 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2597
2598 bool IsSet = Opcode != SPIRV::OpPredicatedStoreINTEL;
2599 unsigned ArgSz = Call->Arguments.size();
2601 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2602 // Memory operand is optional and is literal.
2603 if (ArgSz > 3)
2604 ImmArgs.push_back(
2605 getConstFromIntrinsic(Call->Arguments[/*Literal index*/ 3], MRI));
2606
2607 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2608 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2609 IsSet ? TypeReg : Register(0), ImmArgs);
2610}
2611
2613 MachineIRBuilder &MIRBuilder,
2614 SPIRVGlobalRegistry *GR) {
2615 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2616 SPIRVType *PtrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
2617 assert(PtrType->getOpcode() == SPIRV::OpTypePointer &&
2618 PtrType->getOperand(2).isReg());
2619 Register TypeReg = PtrType->getOperand(2).getReg();
2621 MachineFunction &MF = MIRBuilder.getMF();
2622 Register TmpReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2623 GR->assignSPIRVTypeToVReg(StructType, TmpReg, MF);
2624 // Skip the first arg, it's the destination pointer. OpBuildNDRange takes
2625 // three other arguments, so pass zero constant on absence.
2626 unsigned NumArgs = Call->Arguments.size();
2627 assert(NumArgs >= 2);
2628 Register GlobalWorkSize = Call->Arguments[NumArgs < 4 ? 1 : 2];
2629 Register LocalWorkSize =
2630 NumArgs == 2 ? Register(0) : Call->Arguments[NumArgs < 4 ? 2 : 3];
2631 Register GlobalWorkOffset = NumArgs <= 3 ? Register(0) : Call->Arguments[1];
2632 if (NumArgs < 4) {
2633 Register Const;
2634 SPIRVType *SpvTy = GR->getSPIRVTypeForVReg(GlobalWorkSize);
2635 if (SpvTy->getOpcode() == SPIRV::OpTypePointer) {
2636 MachineInstr *DefInstr = MRI->getUniqueVRegDef(GlobalWorkSize);
2637 assert(DefInstr && isSpvIntrinsic(*DefInstr, Intrinsic::spv_gep) &&
2638 DefInstr->getOperand(3).isReg());
2639 Register GWSPtr = DefInstr->getOperand(3).getReg();
2640 // TODO: Maybe simplify generation of the type of the fields.
2641 unsigned Size = Call->Builtin->Name == "ndrange_3D" ? 3 : 2;
2642 unsigned BitWidth = GR->getPointerSize() == 64 ? 64 : 32;
2644 Type *FieldTy = ArrayType::get(BaseTy, Size);
2645 SPIRVType *SpvFieldTy = GR->getOrCreateSPIRVType(
2646 FieldTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, true);
2647 GlobalWorkSize = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2648 GR->assignSPIRVTypeToVReg(SpvFieldTy, GlobalWorkSize, MF);
2649 MIRBuilder.buildInstr(SPIRV::OpLoad)
2650 .addDef(GlobalWorkSize)
2651 .addUse(GR->getSPIRVTypeID(SpvFieldTy))
2652 .addUse(GWSPtr);
2653 const SPIRVSubtarget &ST =
2654 cast<SPIRVSubtarget>(MIRBuilder.getMF().getSubtarget());
2655 Const = GR->getOrCreateConstIntArray(0, Size, *MIRBuilder.getInsertPt(),
2656 SpvFieldTy, *ST.getInstrInfo());
2657 } else {
2658 Const = GR->buildConstantInt(0, MIRBuilder, SpvTy, true);
2659 }
2660 if (!LocalWorkSize.isValid())
2661 LocalWorkSize = Const;
2662 if (!GlobalWorkOffset.isValid())
2663 GlobalWorkOffset = Const;
2664 }
2665 assert(LocalWorkSize.isValid() && GlobalWorkOffset.isValid());
2666 MIRBuilder.buildInstr(SPIRV::OpBuildNDRange)
2667 .addDef(TmpReg)
2668 .addUse(TypeReg)
2669 .addUse(GlobalWorkSize)
2670 .addUse(LocalWorkSize)
2671 .addUse(GlobalWorkOffset);
2672 return MIRBuilder.buildInstr(SPIRV::OpStore)
2673 .addUse(Call->Arguments[0])
2674 .addUse(TmpReg);
2675}
2676
2677// TODO: maybe move to the global register.
2678static SPIRVType *
2680 SPIRVGlobalRegistry *GR) {
2681 LLVMContext &Context = MIRBuilder.getMF().getFunction().getContext();
2682 unsigned SC1 = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
2683 Type *PtrType = PointerType::get(Context, SC1);
2684 return GR->getOrCreateSPIRVType(PtrType, MIRBuilder,
2685 SPIRV::AccessQualifier::ReadWrite, true);
2686}
2687
2689 MachineIRBuilder &MIRBuilder,
2690 SPIRVGlobalRegistry *GR) {
2691 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2692 const DataLayout &DL = MIRBuilder.getDataLayout();
2693 bool IsSpirvOp = Call->isSpirvOp();
2694 bool HasEvents = Call->Builtin->Name.contains("events") || IsSpirvOp;
2695 const SPIRVType *Int32Ty = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
2696
2697 // Make vararg instructions before OpEnqueueKernel.
2698 // Local sizes arguments: Sizes of block invoke arguments. Clang generates
2699 // local size operands as an array, so we need to unpack them.
2700 SmallVector<Register, 16> LocalSizes;
2701 if (Call->Builtin->Name.contains("_varargs") || IsSpirvOp) {
2702 const unsigned LocalSizeArrayIdx = HasEvents ? 9 : 6;
2703 Register GepReg = Call->Arguments[LocalSizeArrayIdx];
2704 MachineInstr *GepMI = MRI->getUniqueVRegDef(GepReg);
2705 assert(isSpvIntrinsic(*GepMI, Intrinsic::spv_gep) &&
2706 GepMI->getOperand(3).isReg());
2707 Register ArrayReg = GepMI->getOperand(3).getReg();
2708 MachineInstr *ArrayMI = MRI->getUniqueVRegDef(ArrayReg);
2709 const Type *LocalSizeTy = getMachineInstrType(ArrayMI);
2710 assert(LocalSizeTy && "Local size type is expected");
2711 const uint64_t LocalSizeNum =
2712 cast<ArrayType>(LocalSizeTy)->getNumElements();
2713 unsigned SC = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
2714 const LLT LLType = LLT::pointer(SC, GR->getPointerSize());
2715 const SPIRVType *PointerSizeTy = GR->getOrCreateSPIRVPointerType(
2716 Int32Ty, MIRBuilder, SPIRV::StorageClass::Function);
2717 for (unsigned I = 0; I < LocalSizeNum; ++I) {
2718 Register Reg = MRI->createVirtualRegister(&SPIRV::pIDRegClass);
2719 MRI->setType(Reg, LLType);
2720 GR->assignSPIRVTypeToVReg(PointerSizeTy, Reg, MIRBuilder.getMF());
2721 auto GEPInst = MIRBuilder.buildIntrinsic(
2722 Intrinsic::spv_gep, ArrayRef<Register>{Reg}, true, false);
2723 GEPInst
2724 .addImm(GepMI->getOperand(2).getImm()) // In bound.
2725 .addUse(ArrayMI->getOperand(0).getReg()) // Alloca.
2726 .addUse(buildConstantIntReg32(0, MIRBuilder, GR)) // Indices.
2727 .addUse(buildConstantIntReg32(I, MIRBuilder, GR));
2728 LocalSizes.push_back(Reg);
2729 }
2730 }
2731
2732 // SPIRV OpEnqueueKernel instruction has 10+ arguments.
2733 auto MIB = MIRBuilder.buildInstr(SPIRV::OpEnqueueKernel)
2734 .addDef(Call->ReturnRegister)
2736
2737 // Copy all arguments before block invoke function pointer.
2738 const unsigned BlockFIdx = HasEvents ? 6 : 3;
2739 for (unsigned i = 0; i < BlockFIdx; i++)
2740 MIB.addUse(Call->Arguments[i]);
2741
2742 // If there are no event arguments in the original call, add dummy ones.
2743 if (!HasEvents) {
2744 MIB.addUse(buildConstantIntReg32(0, MIRBuilder, GR)); // Dummy num events.
2745 Register NullPtr = GR->getOrCreateConstNullPtr(
2746 MIRBuilder, getOrCreateSPIRVDeviceEventPointer(MIRBuilder, GR));
2747 MIB.addUse(NullPtr); // Dummy wait events.
2748 MIB.addUse(NullPtr); // Dummy ret event.
2749 }
2750
2751 MachineInstr *BlockMI = getBlockStructInstr(Call->Arguments[BlockFIdx], MRI);
2752 assert(BlockMI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
2753 // Invoke: Pointer to invoke function.
2754 MIB.addGlobalAddress(BlockMI->getOperand(1).getGlobal());
2755
2756 Register BlockLiteralReg = Call->Arguments[BlockFIdx + 1];
2757 // Param: Pointer to block literal.
2758 MIB.addUse(BlockLiteralReg);
2759
2760 Type *PType = const_cast<Type *>(getBlockStructType(BlockLiteralReg, MRI));
2761 // TODO: these numbers should be obtained from block literal structure.
2762 // Param Size: Size of block literal structure.
2763 MIB.addUse(buildConstantIntReg32(DL.getTypeStoreSize(PType), MIRBuilder, GR));
2764 // Param Aligment: Aligment of block literal structure.
2765 MIB.addUse(buildConstantIntReg32(DL.getPrefTypeAlign(PType).value(),
2766 MIRBuilder, GR));
2767
2768 for (unsigned i = 0; i < LocalSizes.size(); i++)
2769 MIB.addUse(LocalSizes[i]);
2770 return true;
2771}
2772
2774 MachineIRBuilder &MIRBuilder,
2775 SPIRVGlobalRegistry *GR) {
2776 // Lookup the instruction opcode in the TableGen records.
2777 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2778 unsigned Opcode =
2779 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2780
2781 switch (Opcode) {
2782 case SPIRV::OpRetainEvent:
2783 case SPIRV::OpReleaseEvent:
2784 return MIRBuilder.buildInstr(Opcode).addUse(Call->Arguments[0]);
2785 case SPIRV::OpCreateUserEvent:
2786 case SPIRV::OpGetDefaultQueue:
2787 return MIRBuilder.buildInstr(Opcode)
2788 .addDef(Call->ReturnRegister)
2789 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2790 case SPIRV::OpIsValidEvent:
2791 return MIRBuilder.buildInstr(Opcode)
2792 .addDef(Call->ReturnRegister)
2793 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2794 .addUse(Call->Arguments[0]);
2795 case SPIRV::OpSetUserEventStatus:
2796 return MIRBuilder.buildInstr(Opcode)
2797 .addUse(Call->Arguments[0])
2798 .addUse(Call->Arguments[1]);
2799 case SPIRV::OpCaptureEventProfilingInfo:
2800 return MIRBuilder.buildInstr(Opcode)
2801 .addUse(Call->Arguments[0])
2802 .addUse(Call->Arguments[1])
2803 .addUse(Call->Arguments[2]);
2804 case SPIRV::OpBuildNDRange:
2805 return buildNDRange(Call, MIRBuilder, GR);
2806 case SPIRV::OpEnqueueKernel:
2807 return buildEnqueueKernel(Call, MIRBuilder, GR);
2808 default:
2809 return false;
2810 }
2811}
2812
2814 MachineIRBuilder &MIRBuilder,
2815 SPIRVGlobalRegistry *GR) {
2816 // Lookup the instruction opcode in the TableGen records.
2817 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2818 unsigned Opcode =
2819 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2820
2821 bool IsSet = Opcode == SPIRV::OpGroupAsyncCopy;
2822 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2823 if (Call->isSpirvOp())
2824 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2825 IsSet ? TypeReg : Register(0));
2826
2827 auto Scope = buildConstantIntReg32(SPIRV::Scope::Workgroup, MIRBuilder, GR);
2828
2829 switch (Opcode) {
2830 case SPIRV::OpGroupAsyncCopy: {
2831 SPIRVType *NewType =
2832 Call->ReturnType->getOpcode() == SPIRV::OpTypeEvent
2833 ? nullptr
2834 : GR->getOrCreateSPIRVTypeByName("spirv.Event", MIRBuilder, true);
2835 Register TypeReg = GR->getSPIRVTypeID(NewType ? NewType : Call->ReturnType);
2836 unsigned NumArgs = Call->Arguments.size();
2837 Register EventReg = Call->Arguments[NumArgs - 1];
2838 bool Res = MIRBuilder.buildInstr(Opcode)
2839 .addDef(Call->ReturnRegister)
2840 .addUse(TypeReg)
2841 .addUse(Scope)
2842 .addUse(Call->Arguments[0])
2843 .addUse(Call->Arguments[1])
2844 .addUse(Call->Arguments[2])
2845 .addUse(Call->Arguments.size() > 4
2846 ? Call->Arguments[3]
2847 : buildConstantIntReg32(1, MIRBuilder, GR))
2848 .addUse(EventReg);
2849 if (NewType != nullptr)
2850 updateRegType(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder,
2851 MIRBuilder.getMF().getRegInfo());
2852 return Res;
2853 }
2854 case SPIRV::OpGroupWaitEvents:
2855 return MIRBuilder.buildInstr(Opcode)
2856 .addUse(Scope)
2857 .addUse(Call->Arguments[0])
2858 .addUse(Call->Arguments[1]);
2859 default:
2860 return false;
2861 }
2862}
2863
2864static bool generateConvertInst(const StringRef DemangledCall,
2866 MachineIRBuilder &MIRBuilder,
2867 SPIRVGlobalRegistry *GR) {
2868 // Lookup the conversion builtin in the TableGen records.
2869 const SPIRV::ConvertBuiltin *Builtin =
2870 SPIRV::lookupConvertBuiltin(Call->Builtin->Name, Call->Builtin->Set);
2871
2872 if (!Builtin && Call->isSpirvOp()) {
2873 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2874 unsigned Opcode =
2875 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2876 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2877 GR->getSPIRVTypeID(Call->ReturnType));
2878 }
2879
2880 assert(Builtin && "Conversion builtin not found.");
2881 if (Builtin->IsSaturated)
2882 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
2883 SPIRV::Decoration::SaturatedConversion, {});
2884 if (Builtin->IsRounded)
2885 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
2886 SPIRV::Decoration::FPRoundingMode,
2887 {(unsigned)Builtin->RoundingMode});
2888
2889 std::string NeedExtMsg; // no errors if empty
2890 bool IsRightComponentsNumber = true; // check if input/output accepts vectors
2891 unsigned Opcode = SPIRV::OpNop;
2892 if (GR->isScalarOrVectorOfType(Call->Arguments[0], SPIRV::OpTypeInt)) {
2893 // Int -> ...
2894 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2895 // Int -> Int
2896 if (Builtin->IsSaturated)
2897 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpSatConvertUToS
2898 : SPIRV::OpSatConvertSToU;
2899 else
2900 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpUConvert
2901 : SPIRV::OpSConvert;
2902 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2903 SPIRV::OpTypeFloat)) {
2904 // Int -> Float
2905 if (Builtin->IsBfloat16) {
2906 const auto *ST = static_cast<const SPIRVSubtarget *>(
2907 &MIRBuilder.getMF().getSubtarget());
2908 if (!ST->canUseExtension(
2909 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2910 NeedExtMsg = "SPV_INTEL_bfloat16_conversion";
2911 IsRightComponentsNumber =
2912 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2913 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2914 Opcode = SPIRV::OpConvertBF16ToFINTEL;
2915 } else {
2916 bool IsSourceSigned =
2917 DemangledCall[DemangledCall.find_first_of('(') + 1] != 'u';
2918 Opcode = IsSourceSigned ? SPIRV::OpConvertSToF : SPIRV::OpConvertUToF;
2919 }
2920 }
2921 } else if (GR->isScalarOrVectorOfType(Call->Arguments[0],
2922 SPIRV::OpTypeFloat)) {
2923 // Float -> ...
2924 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2925 // Float -> Int
2926 if (Builtin->IsBfloat16) {
2927 const auto *ST = static_cast<const SPIRVSubtarget *>(
2928 &MIRBuilder.getMF().getSubtarget());
2929 if (!ST->canUseExtension(
2930 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2931 NeedExtMsg = "SPV_INTEL_bfloat16_conversion";
2932 IsRightComponentsNumber =
2933 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2934 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2935 Opcode = SPIRV::OpConvertFToBF16INTEL;
2936 } else {
2937 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpConvertFToS
2938 : SPIRV::OpConvertFToU;
2939 }
2940 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2941 SPIRV::OpTypeFloat)) {
2942 if (Builtin->IsTF32) {
2943 const auto *ST = static_cast<const SPIRVSubtarget *>(
2944 &MIRBuilder.getMF().getSubtarget());
2945 if (!ST->canUseExtension(
2946 SPIRV::Extension::SPV_INTEL_tensor_float32_conversion))
2947 NeedExtMsg = "SPV_INTEL_tensor_float32_conversion";
2948 IsRightComponentsNumber =
2949 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2950 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2951 Opcode = SPIRV::OpRoundFToTF32INTEL;
2952 } else {
2953 // Float -> Float
2954 Opcode = SPIRV::OpFConvert;
2955 }
2956 }
2957 }
2958
2959 if (!NeedExtMsg.empty()) {
2960 std::string DiagMsg = std::string(Builtin->Name) +
2961 ": the builtin requires the following SPIR-V "
2962 "extension: " +
2963 NeedExtMsg;
2964 report_fatal_error(DiagMsg.c_str(), false);
2965 }
2966 if (!IsRightComponentsNumber) {
2967 std::string DiagMsg =
2968 std::string(Builtin->Name) +
2969 ": result and argument must have the same number of components";
2970 report_fatal_error(DiagMsg.c_str(), false);
2971 }
2972 assert(Opcode != SPIRV::OpNop &&
2973 "Conversion between the types not implemented!");
2974
2975 MIRBuilder.buildInstr(Opcode)
2976 .addDef(Call->ReturnRegister)
2977 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2978 .addUse(Call->Arguments[0]);
2979 return true;
2980}
2981
2983 MachineIRBuilder &MIRBuilder,
2984 SPIRVGlobalRegistry *GR) {
2985 // Lookup the vector load/store builtin in the TableGen records.
2986 const SPIRV::VectorLoadStoreBuiltin *Builtin =
2987 SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2988 Call->Builtin->Set);
2989 // Build extended instruction.
2990 auto MIB =
2991 MIRBuilder.buildInstr(SPIRV::OpExtInst)
2992 .addDef(Call->ReturnRegister)
2993 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2994 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
2995 .addImm(Builtin->Number);
2996 for (auto Argument : Call->Arguments)
2997 MIB.addUse(Argument);
2998 if (Builtin->Name.contains("load") && Builtin->ElementCount > 1)
2999 MIB.addImm(Builtin->ElementCount);
3000
3001 // Rounding mode should be passed as a last argument in the MI for builtins
3002 // like "vstorea_halfn_r".
3003 if (Builtin->IsRounded)
3004 MIB.addImm(static_cast<uint32_t>(Builtin->RoundingMode));
3005 return true;
3006}
3007
3009 MachineIRBuilder &MIRBuilder,
3010 SPIRVGlobalRegistry *GR) {
3011 // Lookup the instruction opcode in the TableGen records.
3012 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
3013 unsigned Opcode =
3014 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
3015 bool IsLoad = Opcode == SPIRV::OpLoad;
3016 // Build the instruction.
3017 auto MIB = MIRBuilder.buildInstr(Opcode);
3018 if (IsLoad) {
3019 MIB.addDef(Call->ReturnRegister);
3020 MIB.addUse(GR->getSPIRVTypeID(Call->ReturnType));
3021 }
3022 // Add a pointer to the value to load/store.
3023 MIB.addUse(Call->Arguments[0]);
3024 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
3025 // Add a value to store.
3026 if (!IsLoad)
3027 MIB.addUse(Call->Arguments[1]);
3028 // Add optional memory attributes and an alignment.
3029 unsigned NumArgs = Call->Arguments.size();
3030 if ((IsLoad && NumArgs >= 2) || NumArgs >= 3)
3031 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 1 : 2], MRI));
3032 if ((IsLoad && NumArgs >= 3) || NumArgs >= 4)
3033 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 2 : 3], MRI));
3034 return true;
3035}
3036
3037namespace SPIRV {
3038// Try to find a builtin function attributes by a demangled function name and
3039// return a tuple <builtin group, op code, ext instruction number>, or a special
3040// tuple value <-1, 0, 0> if the builtin function is not found.
3041// Not all builtin functions are supported, only those with a ready-to-use op
3042// code or instruction number defined in TableGen.
3043// TODO: consider a major rework of mapping demangled calls into a builtin
3044// functions to unify search and decrease number of individual cases.
3045std::tuple<int, unsigned, unsigned>
3046mapBuiltinToOpcode(const StringRef DemangledCall,
3047 SPIRV::InstructionSet::InstructionSet Set) {
3048 Register Reg;
3050 std::unique_ptr<const IncomingCall> Call =
3051 lookupBuiltin(DemangledCall, Set, Reg, nullptr, Args);
3052 if (!Call)
3053 return std::make_tuple(-1, 0, 0);
3054
3055 switch (Call->Builtin->Group) {
3056 case SPIRV::Relational:
3057 case SPIRV::Atomic:
3058 case SPIRV::Barrier:
3059 case SPIRV::CastToPtr:
3060 case SPIRV::ImageMiscQuery:
3061 case SPIRV::SpecConstant:
3062 case SPIRV::Enqueue:
3063 case SPIRV::AsyncCopy:
3064 case SPIRV::LoadStore:
3065 case SPIRV::CoopMatr:
3066 if (const auto *R =
3067 SPIRV::lookupNativeBuiltin(Call->Builtin->Name, Call->Builtin->Set))
3068 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3069 break;
3070 case SPIRV::Extended:
3071 if (const auto *R = SPIRV::lookupExtendedBuiltin(Call->Builtin->Name,
3072 Call->Builtin->Set))
3073 return std::make_tuple(Call->Builtin->Group, 0, R->Number);
3074 break;
3075 case SPIRV::VectorLoadStore:
3076 if (const auto *R = SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
3077 Call->Builtin->Set))
3078 return std::make_tuple(SPIRV::Extended, 0, R->Number);
3079 break;
3080 case SPIRV::Group:
3081 if (const auto *R = SPIRV::lookupGroupBuiltin(Call->Builtin->Name))
3082 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3083 break;
3084 case SPIRV::AtomicFloating:
3085 if (const auto *R = SPIRV::lookupAtomicFloatingBuiltin(Call->Builtin->Name))
3086 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3087 break;
3088 case SPIRV::IntelSubgroups:
3089 if (const auto *R = SPIRV::lookupIntelSubgroupsBuiltin(Call->Builtin->Name))
3090 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3091 break;
3092 case SPIRV::GroupUniform:
3093 if (const auto *R = SPIRV::lookupGroupUniformBuiltin(Call->Builtin->Name))
3094 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3095 break;
3096 case SPIRV::IntegerDot:
3097 if (const auto *R =
3098 SPIRV::lookupIntegerDotProductBuiltin(Call->Builtin->Name))
3099 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3100 break;
3101 case SPIRV::WriteImage:
3102 return std::make_tuple(Call->Builtin->Group, SPIRV::OpImageWrite, 0);
3103 case SPIRV::Select:
3104 return std::make_tuple(Call->Builtin->Group, TargetOpcode::G_SELECT, 0);
3105 case SPIRV::Construct:
3106 return std::make_tuple(Call->Builtin->Group, SPIRV::OpCompositeConstruct,
3107 0);
3108 case SPIRV::KernelClock:
3109 return std::make_tuple(Call->Builtin->Group, SPIRV::OpReadClockKHR, 0);
3110 default:
3111 return std::make_tuple(-1, 0, 0);
3112 }
3113 return std::make_tuple(-1, 0, 0);
3114}
3115
3116std::optional<bool> lowerBuiltin(const StringRef DemangledCall,
3117 SPIRV::InstructionSet::InstructionSet Set,
3118 MachineIRBuilder &MIRBuilder,
3119 const Register OrigRet, const Type *OrigRetTy,
3120 const SmallVectorImpl<Register> &Args,
3121 SPIRVGlobalRegistry *GR, const CallBase &CB) {
3122 LLVM_DEBUG(dbgs() << "Lowering builtin call: " << DemangledCall << "\n");
3123
3124 // Lookup the builtin in the TableGen records.
3125 SPIRVType *SpvType = GR->getSPIRVTypeForVReg(OrigRet);
3126 assert(SpvType && "Inconsistent return register: expected valid type info");
3127 std::unique_ptr<const IncomingCall> Call =
3128 lookupBuiltin(DemangledCall, Set, OrigRet, SpvType, Args);
3129
3130 if (!Call) {
3131 LLVM_DEBUG(dbgs() << "Builtin record was not found!\n");
3132 return std::nullopt;
3133 }
3134
3135 // TODO: check if the provided args meet the builtin requirments.
3136 assert(Args.size() >= Call->Builtin->MinNumArgs &&
3137 "Too few arguments to generate the builtin");
3138 if (Call->Builtin->MaxNumArgs && Args.size() > Call->Builtin->MaxNumArgs)
3139 LLVM_DEBUG(dbgs() << "More arguments provided than required!\n");
3140
3141 // Match the builtin with implementation based on the grouping.
3142 switch (Call->Builtin->Group) {
3143 case SPIRV::Extended:
3144 return generateExtInst(Call.get(), MIRBuilder, GR, CB);
3145 case SPIRV::Relational:
3146 return generateRelationalInst(Call.get(), MIRBuilder, GR);
3147 case SPIRV::Group:
3148 return generateGroupInst(Call.get(), MIRBuilder, GR);
3149 case SPIRV::Variable:
3150 return generateBuiltinVar(Call.get(), MIRBuilder, GR);
3151 case SPIRV::Atomic:
3152 return generateAtomicInst(Call.get(), MIRBuilder, GR);
3153 case SPIRV::AtomicFloating:
3154 return generateAtomicFloatingInst(Call.get(), MIRBuilder, GR);
3155 case SPIRV::Barrier:
3156 return generateBarrierInst(Call.get(), MIRBuilder, GR);
3157 case SPIRV::CastToPtr:
3158 return generateCastToPtrInst(Call.get(), MIRBuilder, GR);
3159 case SPIRV::Dot:
3160 case SPIRV::IntegerDot:
3161 return generateDotOrFMulInst(DemangledCall, Call.get(), MIRBuilder, GR);
3162 case SPIRV::Wave:
3163 return generateWaveInst(Call.get(), MIRBuilder, GR);
3164 case SPIRV::ICarryBorrow:
3165 return generateICarryBorrowInst(Call.get(), MIRBuilder, GR);
3166 case SPIRV::GetQuery:
3167 return generateGetQueryInst(Call.get(), MIRBuilder, GR);
3168 case SPIRV::ImageSizeQuery:
3169 return generateImageSizeQueryInst(Call.get(), MIRBuilder, GR);
3170 case SPIRV::ImageMiscQuery:
3171 return generateImageMiscQueryInst(Call.get(), MIRBuilder, GR);
3172 case SPIRV::ReadImage:
3173 return generateReadImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
3174 case SPIRV::WriteImage:
3175 return generateWriteImageInst(Call.get(), MIRBuilder, GR);
3176 case SPIRV::SampleImage:
3177 return generateSampleImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
3178 case SPIRV::Select:
3179 return generateSelectInst(Call.get(), MIRBuilder);
3180 case SPIRV::Construct:
3181 return generateConstructInst(Call.get(), MIRBuilder, GR);
3182 case SPIRV::SpecConstant:
3183 return generateSpecConstantInst(Call.get(), MIRBuilder, GR);
3184 case SPIRV::Enqueue:
3185 return generateEnqueueInst(Call.get(), MIRBuilder, GR);
3186 case SPIRV::AsyncCopy:
3187 return generateAsyncCopy(Call.get(), MIRBuilder, GR);
3188 case SPIRV::Convert:
3189 return generateConvertInst(DemangledCall, Call.get(), MIRBuilder, GR);
3190 case SPIRV::VectorLoadStore:
3191 return generateVectorLoadStoreInst(Call.get(), MIRBuilder, GR);
3192 case SPIRV::LoadStore:
3193 return generateLoadStoreInst(Call.get(), MIRBuilder, GR);
3194 case SPIRV::IntelSubgroups:
3195 return generateIntelSubgroupsInst(Call.get(), MIRBuilder, GR);
3196 case SPIRV::GroupUniform:
3197 return generateGroupUniformInst(Call.get(), MIRBuilder, GR);
3198 case SPIRV::KernelClock:
3199 return generateKernelClockInst(Call.get(), MIRBuilder, GR);
3200 case SPIRV::CoopMatr:
3201 return generateCoopMatrInst(Call.get(), MIRBuilder, GR);
3202 case SPIRV::ExtendedBitOps:
3203 return generateExtendedBitOpsInst(Call.get(), MIRBuilder, GR);
3204 case SPIRV::BindlessINTEL:
3205 return generateBindlessImageINTELInst(Call.get(), MIRBuilder, GR);
3206 case SPIRV::TernaryBitwiseINTEL:
3207 return generateTernaryBitwiseFunctionINTELInst(Call.get(), MIRBuilder, GR);
3208 case SPIRV::Block2DLoadStore:
3209 return generate2DBlockIOINTELInst(Call.get(), MIRBuilder, GR);
3210 case SPIRV::Pipe:
3211 return generatePipeInst(Call.get(), MIRBuilder, GR);
3212 case SPIRV::PredicatedLoadStore:
3213 return generatePredicatedLoadStoreInst(Call.get(), MIRBuilder, GR);
3214 case SPIRV::BlockingPipes:
3215 return generateBlockingPipesInst(Call.get(), MIRBuilder, GR);
3216 case SPIRV::ArbitraryPrecisionFixedPoint:
3217 return generateAPFixedPointInst(Call.get(), MIRBuilder, GR);
3218 case SPIRV::ImageChannelDataTypes:
3219 return generateImageChannelDataTypeInst(Call.get(), MIRBuilder, GR);
3220 }
3221 return false;
3222}
3223
3225 // Parse strings representing OpenCL builtin types.
3226 if (hasBuiltinTypePrefix(TypeStr)) {
3227 // OpenCL builtin types in demangled call strings have the following format:
3228 // e.g. ocl_image2d_ro
3229 [[maybe_unused]] bool IsOCLBuiltinType = TypeStr.consume_front("ocl_");
3230 assert(IsOCLBuiltinType && "Invalid OpenCL builtin prefix");
3231
3232 // Check if this is pointer to a builtin type and not just pointer
3233 // representing a builtin type. In case it is a pointer to builtin type,
3234 // this will require additional handling in the method calling
3235 // parseBuiltinCallArgumentBaseType(...) as this function only retrieves the
3236 // base types.
3237 if (TypeStr.ends_with("*"))
3238 TypeStr = TypeStr.slice(0, TypeStr.find_first_of(" *"));
3239
3240 return parseBuiltinTypeNameToTargetExtType("opencl." + TypeStr.str() + "_t",
3241 Ctx);
3242 }
3243
3244 // Parse type name in either "typeN" or "type vector[N]" format, where
3245 // N is the number of elements of the vector.
3246 Type *BaseType;
3247 unsigned VecElts = 0;
3248
3249 BaseType = parseBasicTypeName(TypeStr, Ctx);
3250 if (!BaseType)
3251 // Unable to recognize SPIRV type name.
3252 return nullptr;
3253
3254 // Handle "typeN*" or "type vector[N]*".
3255 TypeStr.consume_back("*");
3256
3257 if (TypeStr.consume_front(" vector["))
3258 TypeStr = TypeStr.substr(0, TypeStr.find(']'));
3259
3260 TypeStr.getAsInteger(10, VecElts);
3261 if (VecElts > 0)
3263 BaseType->isVoidTy() ? Type::getInt8Ty(Ctx) : BaseType, VecElts, false);
3264
3265 return BaseType;
3266}
3267
3269 const StringRef DemangledCall, LLVMContext &Ctx) {
3270 auto Pos1 = DemangledCall.find('(');
3271 if (Pos1 == StringRef::npos)
3272 return false;
3273 auto Pos2 = DemangledCall.find(')');
3274 if (Pos2 == StringRef::npos || Pos1 > Pos2)
3275 return false;
3276 DemangledCall.slice(Pos1 + 1, Pos2)
3277 .split(BuiltinArgsTypeStrs, ',', -1, false);
3278 return true;
3279}
3280
3282 unsigned ArgIdx, LLVMContext &Ctx) {
3283 SmallVector<StringRef, 10> BuiltinArgsTypeStrs;
3284 parseBuiltinTypeStr(BuiltinArgsTypeStrs, DemangledCall, Ctx);
3285 if (ArgIdx >= BuiltinArgsTypeStrs.size())
3286 return nullptr;
3287 StringRef TypeStr = BuiltinArgsTypeStrs[ArgIdx].trim();
3288 return parseBuiltinCallArgumentType(TypeStr, Ctx);
3289}
3290
3295
3296#define GET_BuiltinTypes_DECL
3297#define GET_BuiltinTypes_IMPL
3298
3303
3304#define GET_OpenCLTypes_DECL
3305#define GET_OpenCLTypes_IMPL
3306
3307#include "SPIRVGenTables.inc"
3308} // namespace SPIRV
3309
3310//===----------------------------------------------------------------------===//
3311// Misc functions for parsing builtin types.
3312//===----------------------------------------------------------------------===//
3313
3314static Type *parseTypeString(const StringRef Name, LLVMContext &Context) {
3315 if (Name.starts_with("void"))
3316 return Type::getVoidTy(Context);
3317 else if (Name.starts_with("int") || Name.starts_with("uint"))
3318 return Type::getInt32Ty(Context);
3319 else if (Name.starts_with("float"))
3320 return Type::getFloatTy(Context);
3321 else if (Name.starts_with("half"))
3322 return Type::getHalfTy(Context);
3323 report_fatal_error("Unable to recognize type!");
3324}
3325
3326//===----------------------------------------------------------------------===//
3327// Implementation functions for builtin types.
3328//===----------------------------------------------------------------------===//
3329
3331 const SPIRV::BuiltinType *TypeRecord,
3332 MachineIRBuilder &MIRBuilder,
3333 SPIRVGlobalRegistry *GR) {
3334 unsigned Opcode = TypeRecord->Opcode;
3335 // Create or get an existing type from GlobalRegistry.
3336 return GR->getOrCreateOpTypeByOpcode(ExtensionType, MIRBuilder, Opcode);
3337}
3338
3340 SPIRVGlobalRegistry *GR) {
3341 // Create or get an existing type from GlobalRegistry.
3342 return GR->getOrCreateOpTypeSampler(MIRBuilder);
3343}
3344
3345static SPIRVType *getPipeType(const TargetExtType *ExtensionType,
3346 MachineIRBuilder &MIRBuilder,
3347 SPIRVGlobalRegistry *GR) {
3348 assert(ExtensionType->getNumIntParameters() == 1 &&
3349 "Invalid number of parameters for SPIR-V pipe builtin!");
3350 // Create or get an existing type from GlobalRegistry.
3351 return GR->getOrCreateOpTypePipe(MIRBuilder,
3352 SPIRV::AccessQualifier::AccessQualifier(
3353 ExtensionType->getIntParameter(0)));
3354}
3355
3356static SPIRVType *getCoopMatrType(const TargetExtType *ExtensionType,
3357 MachineIRBuilder &MIRBuilder,
3358 SPIRVGlobalRegistry *GR) {
3359 assert(ExtensionType->getNumIntParameters() == 4 &&
3360 "Invalid number of parameters for SPIR-V coop matrices builtin!");
3361 assert(ExtensionType->getNumTypeParameters() == 1 &&
3362 "SPIR-V coop matrices builtin type must have a type parameter!");
3363 const SPIRVType *ElemType =
3364 GR->getOrCreateSPIRVType(ExtensionType->getTypeParameter(0), MIRBuilder,
3365 SPIRV::AccessQualifier::ReadWrite, true);
3366 // Create or get an existing type from GlobalRegistry.
3367 return GR->getOrCreateOpTypeCoopMatr(
3368 MIRBuilder, ExtensionType, ElemType, ExtensionType->getIntParameter(0),
3369 ExtensionType->getIntParameter(1), ExtensionType->getIntParameter(2),
3370 ExtensionType->getIntParameter(3), true);
3371}
3372
3374 MachineIRBuilder &MIRBuilder,
3375 SPIRVGlobalRegistry *GR) {
3376 SPIRVType *OpaqueImageType = GR->getImageType(
3377 OpaqueType, SPIRV::AccessQualifier::ReadOnly, MIRBuilder);
3378 // Create or get an existing type from GlobalRegistry.
3379 return GR->getOrCreateOpTypeSampledImage(OpaqueImageType, MIRBuilder);
3380}
3381
3382static SPIRVType *getInlineSpirvType(const TargetExtType *ExtensionType,
3383 MachineIRBuilder &MIRBuilder,
3384 SPIRVGlobalRegistry *GR) {
3385 assert(ExtensionType->getNumIntParameters() == 3 &&
3386 "Inline SPIR-V type builtin takes an opcode, size, and alignment "
3387 "parameter");
3388 auto Opcode = ExtensionType->getIntParameter(0);
3389
3390 SmallVector<MCOperand> Operands;
3391 for (Type *Param : ExtensionType->type_params()) {
3392 if (const TargetExtType *ParamEType = dyn_cast<TargetExtType>(Param)) {
3393 if (ParamEType->getName() == "spirv.IntegralConstant") {
3394 assert(ParamEType->getNumTypeParameters() == 1 &&
3395 "Inline SPIR-V integral constant builtin must have a type "
3396 "parameter");
3397 assert(ParamEType->getNumIntParameters() == 1 &&
3398 "Inline SPIR-V integral constant builtin must have a "
3399 "value parameter");
3400
3401 auto OperandValue = ParamEType->getIntParameter(0);
3402 auto *OperandType = ParamEType->getTypeParameter(0);
3403
3404 const SPIRVType *OperandSPIRVType = GR->getOrCreateSPIRVType(
3405 OperandType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, true);
3406
3408 OperandValue, MIRBuilder, OperandSPIRVType, true)));
3409 continue;
3410 } else if (ParamEType->getName() == "spirv.Literal") {
3411 assert(ParamEType->getNumTypeParameters() == 0 &&
3412 "Inline SPIR-V literal builtin does not take type "
3413 "parameters");
3414 assert(ParamEType->getNumIntParameters() == 1 &&
3415 "Inline SPIR-V literal builtin must have an integer "
3416 "parameter");
3417
3418 auto OperandValue = ParamEType->getIntParameter(0);
3419
3420 Operands.push_back(MCOperand::createImm(OperandValue));
3421 continue;
3422 }
3423 }
3424 const SPIRVType *TypeOperand = GR->getOrCreateSPIRVType(
3425 Param, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, true);
3426 Operands.push_back(MCOperand::createReg(GR->getSPIRVTypeID(TypeOperand)));
3427 }
3428
3429 return GR->getOrCreateUnknownType(ExtensionType, MIRBuilder, Opcode,
3430 Operands);
3431}
3432
3433static SPIRVType *getVulkanBufferType(const TargetExtType *ExtensionType,
3434 MachineIRBuilder &MIRBuilder,
3435 SPIRVGlobalRegistry *GR) {
3436 assert(ExtensionType->getNumTypeParameters() == 1 &&
3437 "Vulkan buffers have exactly one type for the type of the buffer.");
3438 assert(ExtensionType->getNumIntParameters() == 2 &&
3439 "Vulkan buffer have 2 integer parameters: storage class and is "
3440 "writable.");
3441
3442 auto *T = ExtensionType->getTypeParameter(0);
3443 auto SC = static_cast<SPIRV::StorageClass::StorageClass>(
3444 ExtensionType->getIntParameter(0));
3445 bool IsWritable = ExtensionType->getIntParameter(1);
3446 return GR->getOrCreateVulkanBufferType(MIRBuilder, T, SC, IsWritable);
3447}
3448
3449static SPIRVType *getLayoutType(const TargetExtType *ExtensionType,
3450 MachineIRBuilder &MIRBuilder,
3451 SPIRVGlobalRegistry *GR) {
3452 return GR->getOrCreateLayoutType(MIRBuilder, ExtensionType);
3453}
3454
3455namespace SPIRV {
3457 LLVMContext &Context) {
3458 StringRef NameWithParameters = TypeName;
3459
3460 // Pointers-to-opaque-structs representing OpenCL types are first translated
3461 // to equivalent SPIR-V types. OpenCL builtin type names should have the
3462 // following format: e.g. %opencl.event_t
3463 if (NameWithParameters.starts_with("opencl.")) {
3464 const SPIRV::OpenCLType *OCLTypeRecord =
3465 SPIRV::lookupOpenCLType(NameWithParameters);
3466 if (!OCLTypeRecord)
3467 report_fatal_error("Missing TableGen record for OpenCL type: " +
3468 NameWithParameters);
3469 NameWithParameters = OCLTypeRecord->SpirvTypeLiteral;
3470 // Continue with the SPIR-V builtin type...
3471 }
3472
3473 // Names of the opaque structs representing a SPIR-V builtins without
3474 // parameters should have the following format: e.g. %spirv.Event
3475 assert(NameWithParameters.starts_with("spirv.") &&
3476 "Unknown builtin opaque type!");
3477
3478 // Parameterized SPIR-V builtins names follow this format:
3479 // e.g. %spirv.Image._void_1_0_0_0_0_0_0, %spirv.Pipe._0
3480 if (!NameWithParameters.contains('_'))
3481 return TargetExtType::get(Context, NameWithParameters);
3482
3483 SmallVector<StringRef> Parameters;
3484 unsigned BaseNameLength = NameWithParameters.find('_') - 1;
3485 SplitString(NameWithParameters.substr(BaseNameLength + 1), Parameters, "_");
3486
3487 SmallVector<Type *, 1> TypeParameters;
3488 bool HasTypeParameter = !isDigit(Parameters[0][0]);
3489 if (HasTypeParameter)
3490 TypeParameters.push_back(parseTypeString(Parameters[0], Context));
3491 SmallVector<unsigned> IntParameters;
3492 for (unsigned i = HasTypeParameter ? 1 : 0; i < Parameters.size(); i++) {
3493 unsigned IntParameter = 0;
3494 bool ValidLiteral = !Parameters[i].getAsInteger(10, IntParameter);
3495 (void)ValidLiteral;
3496 assert(ValidLiteral &&
3497 "Invalid format of SPIR-V builtin parameter literal!");
3498 IntParameters.push_back(IntParameter);
3499 }
3500 return TargetExtType::get(Context,
3501 NameWithParameters.substr(0, BaseNameLength),
3502 TypeParameters, IntParameters);
3503}
3504
3506 SPIRV::AccessQualifier::AccessQualifier AccessQual,
3507 MachineIRBuilder &MIRBuilder,
3508 SPIRVGlobalRegistry *GR) {
3509 // In LLVM IR, SPIR-V and OpenCL builtin types are represented as either
3510 // target(...) target extension types or pointers-to-opaque-structs. The
3511 // approach relying on structs is deprecated and works only in the non-opaque
3512 // pointer mode (-opaque-pointers=0).
3513 // In order to maintain compatibility with LLVM IR generated by older versions
3514 // of Clang and LLVM/SPIR-V Translator, the pointers-to-opaque-structs are
3515 // "translated" to target extension types. This translation is temporary and
3516 // will be removed in the future release of LLVM.
3518 if (!BuiltinType)
3520 OpaqueType->getStructName().str(), MIRBuilder.getContext());
3521
3522 unsigned NumStartingVRegs = MIRBuilder.getMRI()->getNumVirtRegs();
3523
3524 const StringRef Name = BuiltinType->getName();
3525 LLVM_DEBUG(dbgs() << "Lowering builtin type: " << Name << "\n");
3526
3527 SPIRVType *TargetType;
3528 if (Name == "spirv.Type") {
3529 TargetType = getInlineSpirvType(BuiltinType, MIRBuilder, GR);
3530 } else if (Name == "spirv.VulkanBuffer") {
3531 TargetType = getVulkanBufferType(BuiltinType, MIRBuilder, GR);
3532 } else if (Name == "spirv.Padding") {
3533 TargetType = GR->getOrCreatePaddingType(MIRBuilder);
3534 } else if (Name == "spirv.Layout") {
3535 TargetType = getLayoutType(BuiltinType, MIRBuilder, GR);
3536 } else {
3537 // Lookup the demangled builtin type in the TableGen records.
3538 const SPIRV::BuiltinType *TypeRecord = SPIRV::lookupBuiltinType(Name);
3539 if (!TypeRecord)
3540 report_fatal_error("Missing TableGen record for builtin type: " + Name);
3541
3542 // "Lower" the BuiltinType into TargetType. The following get<...>Type
3543 // methods use the implementation details from TableGen records or
3544 // TargetExtType parameters to either create a new OpType<...> machine
3545 // instruction or get an existing equivalent SPIRVType from
3546 // GlobalRegistry.
3547
3548 switch (TypeRecord->Opcode) {
3549 case SPIRV::OpTypeImage:
3550 TargetType = GR->getImageType(BuiltinType, AccessQual, MIRBuilder);
3551 break;
3552 case SPIRV::OpTypePipe:
3553 TargetType = getPipeType(BuiltinType, MIRBuilder, GR);
3554 break;
3555 case SPIRV::OpTypeDeviceEvent:
3556 TargetType = GR->getOrCreateOpTypeDeviceEvent(MIRBuilder);
3557 break;
3558 case SPIRV::OpTypeSampler:
3559 TargetType = getSamplerType(MIRBuilder, GR);
3560 break;
3561 case SPIRV::OpTypeSampledImage:
3562 TargetType = getSampledImageType(BuiltinType, MIRBuilder, GR);
3563 break;
3564 case SPIRV::OpTypeCooperativeMatrixKHR:
3565 TargetType = getCoopMatrType(BuiltinType, MIRBuilder, GR);
3566 break;
3567 default:
3568 TargetType =
3569 getNonParameterizedType(BuiltinType, TypeRecord, MIRBuilder, GR);
3570 break;
3571 }
3572 }
3573
3574 // Emit OpName instruction if a new OpType<...> instruction was added
3575 // (equivalent type was not found in GlobalRegistry).
3576 if (NumStartingVRegs < MIRBuilder.getMRI()->getNumVirtRegs())
3577 buildOpName(GR->getSPIRVTypeID(TargetType), Name, MIRBuilder);
3578
3579 return TargetType;
3580}
3581} // namespace SPIRV
3582} // namespace llvm
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Lower Kernel Arguments
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
Promote Memory to Register
Definition Mem2Reg.cpp:110
#define T
spirv structurize SPIRV
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file contains some functions that are useful when dealing with strings.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static const fltSemantics & IEEEsingle()
Definition APFloat.h:296
APInt bitcastToAPInt() const
Definition APFloat.h:1335
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Definition APFloat.h:1061
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1541
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
const APFloat & getValueAPF() const
Definition Constants.h:325
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Tagged union holding either a T or a Error.
Definition Error.h:485
Class to represent fixed width SIMD vectors.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
static MCOperand createReg(MCRegister Reg)
Definition MCInst.h:138
static MCOperand createImm(int64_t Val)
Definition MCInst.h:145
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
LLVMContext & getContext() const
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
const DataLayout & getDataLayout() const
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
LLVM_ABI void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
const GlobalValue * getGlobal() const
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
const MDNode * getMetadata() const
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
LLVM_ABI void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
unsigned getNumVirtRegs() const
getNumVirtRegs - Return the number of virtual registers created.
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isValid() const
Definition Register.h:112
SPIRVType * getOrCreateOpTypePipe(MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AccQual)
SPIRVType * getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
SPIRVType * getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder, bool EmitIR)
SPIRVType * getOrCreatePaddingType(MachineIRBuilder &MIRBuilder)
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, const MachineFunction &MF)
Register buildGlobalVariable(Register Reg, SPIRVType *BaseType, StringRef Name, const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage, const MachineInstr *Init, bool IsConst, const std::optional< SPIRV::LinkageType::LinkageType > &LinkageType, MachineIRBuilder &MIRBuilder, bool IsInstSelector)
const Type * getTypeForSPIRVType(const SPIRVType *Ty) const
SPIRVType * getOrCreateUnknownType(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode, const ArrayRef< MCOperand > Operands)
unsigned getScalarOrVectorComponentCount(Register VReg) const
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineInstr &I, SPIRV::AccessQualifier::AccessQualifier AQ, bool EmitIR)
SPIRVType * getOrCreateSPIRVPointerType(const Type *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SC)
SPIRVType * getOrCreateOpTypeByOpcode(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode)
Register buildConstantFP(APFloat Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr)
SPIRVType * getPointeeType(SPIRVType *PtrType)
Register getSPIRVTypeID(const SPIRVType *SpirvType) const
bool isScalarOfType(Register VReg, unsigned TypeOpcode) const
SPIRVType * getOrCreateOpTypeSampledImage(SPIRVType *ImageType, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateVulkanBufferType(MachineIRBuilder &MIRBuilder, Type *ElemType, SPIRV::StorageClass::StorageClass SC, bool IsWritable, bool EmitIr=false)
SPIRVType * getOrCreateSPIRVTypeByName(StringRef TypeStr, MachineIRBuilder &MIRBuilder, bool EmitIR, SPIRV::StorageClass::StorageClass SC=SPIRV::StorageClass::Function, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite)
SPIRVType * getOrCreateLayoutType(MachineIRBuilder &MIRBuilder, const TargetExtType *T, bool EmitIr=false)
Register getOrCreateConsIntVector(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, bool EmitIR)
const TargetRegisterClass * getRegClass(SPIRVType *SpvType) const
SPIRVType * getOrCreateSPIRVVectorType(SPIRVType *BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder, bool EmitIR)
SPIRVType * getOrCreateOpTypeCoopMatr(MachineIRBuilder &MIRBuilder, const TargetExtType *ExtensionType, const SPIRVType *ElemType, uint32_t Scope, uint32_t Rows, uint32_t Columns, uint32_t Use, bool EmitIR)
bool isScalarOrVectorOfType(Register VReg, unsigned TypeOpcode) const
Register getOrCreateConstIntArray(uint64_t Val, size_t Num, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
SPIRVType * getOrCreateOpTypeDeviceEvent(MachineIRBuilder &MIRBuilder)
SPIRVType * getImageType(const TargetExtType *ExtensionType, const SPIRV::AccessQualifier::AccessQualifier Qualifier, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineIRBuilder &MIRBuilder)
Register buildConstantInt(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, bool EmitIR, bool ZeroAsNull=true)
LLT getRegType(SPIRVType *SpvType) const
SPIRV::StorageClass::StorageClass getPointerStorageClass(Register VReg) const
SPIRVType * getOrCreateOpTypeSampler(MachineIRBuilder &MIRBuilder)
Register buildConstantSampler(Register Res, unsigned AddrMode, unsigned Param, unsigned FilerMode, MachineIRBuilder &MIRBuilder)
Register getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorBitWidth(const SPIRVType *Type) const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:702
static constexpr size_t npos
Definition StringRef.h:57
bool consume_back(StringRef Suffix)
Returns true if this StringRef has the given suffix and removes that suffix.
Definition StringRef.h:657
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:472
std::string str() const
str - Get the contents as an std::string.
Definition StringRef.h:225
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition StringRef.h:573
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
bool contains_insensitive(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition StringRef.h:438
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
Definition StringRef.h:686
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition StringRef.h:426
bool consume_front(StringRef Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
Definition StringRef.h:637
size_t find_first_of(char C, size_t From=0) const
Find the first character in the string that is C, or npos if not found.
Definition StringRef.h:376
size_t rfind(char C, size_t From=npos) const
Search for the last character C in the string.
Definition StringRef.h:345
size_t find(char C, size_t From=0) const
Search for the first character C in the string.
Definition StringRef.h:293
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition StringRef.h:273
A switch()-like statement whose cases are string literals.
StringSwitch & EndsWith(StringLiteral S, T Value)
Class to represent struct types.
Class to represent target extensions types, which are generally unintrospectable from target-independ...
ArrayRef< Type * > type_params() const
Return the type parameters for this particular target extension type.
unsigned getNumIntParameters() const
static LLVM_ABI TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types={}, ArrayRef< unsigned > Ints={})
Return a target extension type having the specified name and optional type and integer parameters.
Definition Type.cpp:907
Type * getTypeParameter(unsigned i) const
unsigned getNumTypeParameters() const
unsigned getIntParameter(unsigned i) const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
LLVM_ABI StringRef getStructName() const
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:280
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:294
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
Definition Type.cpp:284
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
Definition Type.cpp:282
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
LLVM Value Representation.
Definition Value.h:75
LLVM_ABI Value(Type *Ty, unsigned scid)
Definition Value.cpp:53
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Represents a version number in the form major[.minor[.subminor[.build]]].
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
CallInst * Call
LLVM_C_ABI LLVMTypeRef LLVMVectorType(LLVMTypeRef ElementType, unsigned ElementCount)
Create a vector type that contains a defined type and has a specific number of elements.
Definition Core.cpp:887
std::string lookupBuiltinNameHelper(StringRef DemangledCall, FPDecorationId *DecorationId)
Parses the name part of the demangled builtin call.
Type * parseBuiltinCallArgumentType(StringRef TypeStr, LLVMContext &Ctx)
bool parseBuiltinTypeStr(SmallVector< StringRef, 10 > &BuiltinArgsTypeStrs, const StringRef DemangledCall, LLVMContext &Ctx)
std::optional< bool > lowerBuiltin(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, MachineIRBuilder &MIRBuilder, const Register OrigRet, const Type *OrigRetTy, const SmallVectorImpl< Register > &Args, SPIRVGlobalRegistry *GR, const CallBase &CB)
std::tuple< int, unsigned, unsigned > mapBuiltinToOpcode(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set)
Helper function for finding a builtin function attributes by a demangled function name.
Type * parseBuiltinCallArgumentBaseType(const StringRef DemangledCall, unsigned ArgIdx, LLVMContext &Ctx)
Parses the provided ArgIdx argument base type in the DemangledCall skeleton.
TargetExtType * parseBuiltinTypeNameToTargetExtType(std::string TypeName, LLVMContext &Context)
Translates a string representing a SPIR-V or OpenCL builtin type to a TargetExtType that can be furth...
SPIRVType * lowerBuiltinType(const Type *OpaqueType, SPIRV::AccessQualifier::AccessQualifier AccessQual, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
static bool build2DBlockIOINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's 2d block io instructions.
static SPIRVType * getVulkanBufferType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateExtInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, const CallBase &CB)
static bool generateBindlessImageINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateGetQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getInlineSpirvType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateConstructInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicFlagInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic flag instructions (e.g.
static bool generateImageSizeQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::SamplerFilterMode::SamplerFilterMode getSamplerFilterModeFromBitmask(unsigned Bitmask)
static bool buildAtomicStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic store instruction.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
static bool buildExtendedBitOpsInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building extended bit operations.
static const Type * getBlockStructType(Register ParamReg, MachineRegisterInfo *MRI)
static bool generateGroupInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
FPDecorationId demangledPostfixToDecorationId(const std::string &S)
Definition SPIRVUtils.h:549
void updateRegType(Register Reg, Type *Ty, SPIRVType *SpirvTy, SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Helper external function for assigning SPIRVType to a register, ensuring the register class and type ...
static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim)
static bool generateImageChannelDataTypeInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool builtinMayNeedPromotionToVec(uint32_t BuiltinNumber)
static bool generateICarryBorrowInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildScopeReg(Register CLScopeRegister, SPIRV::Scope::Scope Scope, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI)
FPDecorationId
Definition SPIRVUtils.h:547
static std::tuple< Register, SPIRVType * > buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType, SPIRVGlobalRegistry *GR)
Helper function building either a resulting scalar or vector bool register depending on the expected ...
static unsigned getNumSizeComponents(SPIRVType *imgType)
Helper function for obtaining the number of size components.
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
static Register buildConstantIntReg32(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getSampledImageType(const TargetExtType *OpaqueType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
SmallVector< MachineInstr *, 4 > createContinuedInstructions(MachineIRBuilder &MIRBuilder, unsigned Opcode, unsigned MinWC, unsigned ContinuedOpcode, ArrayRef< Register > Args, Register ReturnRegister, Register TypeID)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
constexpr unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
Definition SPIRVUtils.h:244
static bool generateSampleImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateBarrierInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI void SplitString(StringRef Source, SmallVectorImpl< StringRef > &OutFragments, StringRef Delimiters=" \t\n\v\f\r")
SplitString - Split up the specified string according to the specified delimiters,...
static SPIRVType * getCoopMatrType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildImageChannelDataTypeInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateKernelClockInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static void setRegClassIfNull(Register Reg, MachineRegisterInfo *MRI, SPIRVGlobalRegistry *GR)
static bool generateGroupUniformInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateWaveInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Register createVirtualRegister(SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building barriers, i.e., memory/control ordering operations.
static bool generateAsyncCopy(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope)
static bool buildAPFixedPointInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getSamplerType(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateBlockingPipesInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
static Register buildLoadInst(SPIRVType *BaseType, Register PtrRegister, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, LLT LowLevelType, Register DestinationReg=Register(0))
Helper function for building a load instruction loading into the DestinationReg.
static bool generateEnqueueInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
static bool buildSelectInst(MachineIRBuilder &MIRBuilder, Register ReturnRegister, Register SourceRegister, const SPIRVType *ReturnType, SPIRVGlobalRegistry *GR)
Helper function for building either a vector or scalar select instruction depending on the expected R...
static const Type * getMachineInstrType(MachineInstr *MI)
bool isDigit(char C)
Checks if character C is one of the 10 decimal digits.
static SPIRV::SamplerAddressingMode::SamplerAddressingMode getSamplerAddressingModeFromBitmask(unsigned Bitmask)
static bool generateAtomicInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
const MachineInstr SPIRVType
static SPIRVType * getLayoutType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateDotOrFMulInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildBuiltinVariableLoad(MachineIRBuilder &MIRBuilder, SPIRVType *VariableType, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType, Register Reg=Register(0), bool isConst=true, const std::optional< SPIRV::LinkageType::LinkageType > &LinkageTy={ SPIRV::LinkageType::Import})
Helper function for building a load instruction for loading a builtin global variable of BuiltinValue...
static bool generateConvertInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateTernaryBitwiseFunctionINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateCastToPtrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
constexpr bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
Definition SPIRVUtils.h:229
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
static Register buildMemSemanticsReg(Register SemanticsRegister, Register PtrRegister, unsigned &Semantics, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static unsigned getConstFromIntrinsic(Register Reg, MachineRegisterInfo *MRI)
static bool generateImageMiscQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSelectInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic load instruction.
static bool generateIntelSubgroupsInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateExtendedBitOpsInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildPipeInst(const SPIRV::IncomingCall *Call, unsigned Opcode, unsigned Scope, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSpecConstantInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getOrCreateSPIRVDeviceEventPointer(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * parseBasicTypeName(StringRef &TypeName, LLVMContext &Ctx)
static bool generateVectorLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, uint64_t DefaultValue)
static bool generateCoopMatrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SmallVector< Register > getBuiltinCallArguments(const SPIRV::IncomingCall *Call, uint32_t BuiltinNumber, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildBindlessImageINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's bindless image instructions.
static std::unique_ptr< const SPIRV::IncomingCall > lookupBuiltin(StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
Looks up the demangled builtin call in the SPIRVBuiltins.td records using the provided DemangledCall ...
static bool buildAtomicFloatingRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic floating-type instruction.
MachineInstr * getDefInstrMaybeConstant(Register &ConstReg, const MachineRegisterInfo *MRI)
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1888
static bool generate2DBlockIOINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateReadImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
bool hasBuiltinTypePrefix(StringRef Name)
static bool buildEnqueueKernel(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * getMDOperandAsType(const MDNode *N, unsigned I)
static bool generatePipeInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildTernaryBitwiseFunctionINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's OpBitwiseFunctionINTEL instruction.
static bool generateAPFixedPointInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic instructions.
static SPIRV::MemorySemantics::MemorySemantics getSPIRVMemSemantics(std::memory_order MemOrder)
static bool generateRelationalInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicInitInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
Helper function for translating atomic init to OpStore.
static bool generateWriteImageInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getPipeType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Type * parseTypeString(const StringRef Name, LLVMContext &Context)
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
static bool generatePredicatedLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateAtomicFloatingInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildNDRange(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getNonParameterizedType(const TargetExtType *ExtensionType, const SPIRV::BuiltinType *TypeRecord, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static MachineInstr * getBlockStructInstr(Register ParamReg, MachineRegisterInfo *MRI)
static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode, const SPIRV::IncomingCall *Call, Register TypeReg, ArrayRef< uint32_t > ImmArgs={})
static unsigned getSamplerParamFromBitmask(unsigned Bitmask)
static bool buildAtomicCompareExchangeInst(const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic compare-exchange instruction.
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
static bool generateBuiltinVar(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:870
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
This class contains a discriminated union of information about pointers in memory operands,...
FPRoundingMode::FPRoundingMode RoundingMode
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
const SmallVectorImpl< Register > & Arguments
const std::string BuiltinName
const SPIRVType * ReturnType
const DemangledBuiltin * Builtin
IncomingCall(const std::string BuiltinName, const DemangledBuiltin *Builtin, const Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
FPRoundingMode::FPRoundingMode RoundingMode