LLVM 23.0.0git
SPIRVBuiltins.cpp
Go to the documentation of this file.
1//===- SPIRVBuiltins.cpp - SPIR-V Built-in Functions ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements lowering builtin function calls and types using their
10// demangled names and TableGen records.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SPIRVBuiltins.h"
15#include "SPIRV.h"
16#include "SPIRVSubtarget.h"
17#include "SPIRVUtils.h"
20#include "llvm/IR/IntrinsicsSPIRV.h"
21#include <regex>
22#include <string>
23#include <tuple>
24
25#define DEBUG_TYPE "spirv-builtins"
26
27namespace llvm {
28namespace SPIRV {
29#define GET_BuiltinGroup_DECL
30#include "SPIRVGenTables.inc"
31
34 InstructionSet::InstructionSet Set;
35 BuiltinGroup Group;
38};
39
40#define GET_DemangledBuiltins_DECL
41#define GET_DemangledBuiltins_IMPL
42
60
63 InstructionSet::InstructionSet Set;
65};
66
67#define GET_NativeBuiltins_DECL
68#define GET_NativeBuiltins_IMPL
69
85
86#define GET_GroupBuiltins_DECL
87#define GET_GroupBuiltins_IMPL
88
96
97#define GET_IntelSubgroupsBuiltins_DECL
98#define GET_IntelSubgroupsBuiltins_IMPL
99
104
105#define GET_AtomicFloatingBuiltins_DECL
106#define GET_AtomicFloatingBuiltins_IMPL
112
113#define GET_GroupUniformBuiltins_DECL
114#define GET_GroupUniformBuiltins_IMPL
115
118 InstructionSet::InstructionSet Set;
119 BuiltIn::BuiltIn Value;
120};
121
122using namespace BuiltIn;
123#define GET_GetBuiltins_DECL
124#define GET_GetBuiltins_IMPL
125
128 InstructionSet::InstructionSet Set;
130};
131
132#define GET_ImageQueryBuiltins_DECL
133#define GET_ImageQueryBuiltins_IMPL
134
140
141#define GET_IntegerDotProductBuiltins_DECL
142#define GET_IntegerDotProductBuiltins_IMPL
143
146 InstructionSet::InstructionSet Set;
151 bool IsTF32;
152 FPRoundingMode::FPRoundingMode RoundingMode;
153};
154
157 InstructionSet::InstructionSet Set;
161 FPRoundingMode::FPRoundingMode RoundingMode;
162};
163
164using namespace FPRoundingMode;
165#define GET_ConvertBuiltins_DECL
166#define GET_ConvertBuiltins_IMPL
167
168using namespace InstructionSet;
169#define GET_VectorLoadStoreBuiltins_DECL
170#define GET_VectorLoadStoreBuiltins_IMPL
171
172#define GET_CLMemoryScope_DECL
173#define GET_CLSamplerAddressingMode_DECL
174#define GET_CLMemoryFenceFlags_DECL
175#define GET_ExtendedBuiltins_DECL
176#include "SPIRVGenTables.inc"
177} // namespace SPIRV
178
179//===----------------------------------------------------------------------===//
180// Misc functions for looking up builtins and veryfying requirements using
181// TableGen records
182//===----------------------------------------------------------------------===//
183
184namespace SPIRV {
185/// Parses the name part of the demangled builtin call.
186std::string lookupBuiltinNameHelper(StringRef DemangledCall,
187 FPDecorationId *DecorationId) {
188 StringRef PassPrefix = "(anonymous namespace)::";
189 StringRef SpvPrefix = "__spv::";
190 std::string BuiltinName = DemangledCall.str();
191
192 // Check if the extracted name contains type information between angle
193 // brackets. If so, the builtin is an instantiated template - needs to have
194 // the information after angle brackets and return type removed.
195 std::size_t Pos = BuiltinName.find(">(");
196 if (Pos != std::string::npos) {
197 BuiltinName = BuiltinName.substr(0, BuiltinName.rfind('<', Pos));
198 } else {
199 Pos = BuiltinName.find('(');
200 if (Pos != std::string::npos)
201 BuiltinName = BuiltinName.substr(0, Pos);
202 }
203 BuiltinName = BuiltinName.substr(BuiltinName.find_last_of(' ') + 1);
204
205 // Itanium Demangler result may have "(anonymous namespace)::" or "__spv::"
206 // prefix.
207 if (BuiltinName.find(PassPrefix) == 0)
208 BuiltinName = BuiltinName.substr(PassPrefix.size());
209 else if (BuiltinName.find(SpvPrefix) == 0)
210 BuiltinName = BuiltinName.substr(SpvPrefix.size());
211
212 // Account for possible "__spirv_ocl_" prefix in SPIR-V friendly LLVM IR
213 if (BuiltinName.rfind("__spirv_ocl_", 0) == 0)
214 BuiltinName = BuiltinName.substr(12);
215
216 // Check if the extracted name begins with:
217 // - "__spirv_ImageSampleExplicitLod"
218 // - "__spirv_ImageRead"
219 // - "__spirv_ImageWrite"
220 // - "__spirv_ImageQuerySizeLod"
221 // - "__spirv_UDotKHR"
222 // - "__spirv_SDotKHR"
223 // - "__spirv_SUDotKHR"
224 // - "__spirv_SDotAccSatKHR"
225 // - "__spirv_UDotAccSatKHR"
226 // - "__spirv_SUDotAccSatKHR"
227 // - "__spirv_ReadClockKHR"
228 // - "__spirv_SubgroupBlockReadINTEL"
229 // - "__spirv_SubgroupImageBlockReadINTEL"
230 // - "__spirv_SubgroupImageMediaBlockReadINTEL"
231 // - "__spirv_SubgroupImageMediaBlockWriteINTEL"
232 // - "__spirv_Convert"
233 // - "__spirv_Round"
234 // - "__spirv_UConvert"
235 // - "__spirv_SConvert"
236 // - "__spirv_FConvert"
237 // - "__spirv_SatConvert"
238 // and maybe contains return type information at the end "_R<type>".
239 // If so, extract the plain builtin name without the type information.
240 static const std::regex SpvWithR(
241 "(__spirv_(ImageSampleExplicitLod|ImageRead|ImageWrite|ImageQuerySizeLod|"
242 "UDotKHR|"
243 "SDotKHR|SUDotKHR|SDotAccSatKHR|UDotAccSatKHR|SUDotAccSatKHR|"
244 "ReadClockKHR|SubgroupBlockReadINTEL|SubgroupImageBlockReadINTEL|"
245 "SubgroupImageMediaBlockReadINTEL|SubgroupImageMediaBlockWriteINTEL|"
246 "Convert|Round|"
247 "UConvert|SConvert|FConvert|SatConvert)[^_]*)(_R[^_]*_?(\\w+)?.*)?");
248 std::smatch Match;
249 if (std::regex_match(BuiltinName, Match, SpvWithR) && Match.size() > 1) {
250 std::ssub_match SubMatch;
251 if (DecorationId && Match.size() > 3) {
252 SubMatch = Match[4];
253 *DecorationId = demangledPostfixToDecorationId(SubMatch.str());
254 }
255 SubMatch = Match[1];
256 BuiltinName = SubMatch.str();
257 }
258
259 return BuiltinName;
260}
261} // namespace SPIRV
262
263/// Looks up the demangled builtin call in the SPIRVBuiltins.td records using
264/// the provided \p DemangledCall and specified \p Set.
265///
266/// The lookup follows the following algorithm, returning the first successful
267/// match:
268/// 1. Search with the plain demangled name (expecting a 1:1 match).
269/// 2. Search with the prefix before or suffix after the demangled name
270/// signyfying the type of the first argument.
271///
272/// \returns Wrapper around the demangled call and found builtin definition.
273static std::unique_ptr<const SPIRV::IncomingCall>
275 SPIRV::InstructionSet::InstructionSet Set,
276 Register ReturnRegister, const SPIRVType *ReturnType,
278 std::string BuiltinName = SPIRV::lookupBuiltinNameHelper(DemangledCall);
279
280 SmallVector<StringRef, 10> BuiltinArgumentTypes;
281 StringRef BuiltinArgs =
282 DemangledCall.slice(DemangledCall.find('(') + 1, DemangledCall.find(')'));
283 BuiltinArgs.split(BuiltinArgumentTypes, ',', -1, false);
284
285 // Look up the builtin in the defined set. Start with the plain demangled
286 // name, expecting a 1:1 match in the defined builtin set.
287 const SPIRV::DemangledBuiltin *Builtin;
288 if ((Builtin = SPIRV::lookupBuiltin(BuiltinName, Set)))
289 return std::make_unique<SPIRV::IncomingCall>(
290 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
291
292 // If the initial look up was unsuccessful and the demangled call takes at
293 // least 1 argument, add a prefix or suffix signifying the type of the first
294 // argument and repeat the search.
295 if (BuiltinArgumentTypes.size() >= 1) {
296 char FirstArgumentType = BuiltinArgumentTypes[0][0];
297 // Prefix to be added to the builtin's name for lookup.
298 // For example, OpenCL "abs" taking an unsigned value has a prefix "u_".
299 std::string Prefix;
300
301 switch (FirstArgumentType) {
302 // Unsigned:
303 case 'u':
304 if (Set == SPIRV::InstructionSet::OpenCL_std)
305 Prefix = "u_";
306 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
307 Prefix = "u";
308 break;
309 // Signed:
310 case 'c':
311 case 's':
312 case 'i':
313 case 'l':
314 if (Set == SPIRV::InstructionSet::OpenCL_std)
315 Prefix = "s_";
316 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
317 Prefix = "s";
318 break;
319 // Floating-point:
320 case 'f':
321 case 'd':
322 case 'h':
323 if (Set == SPIRV::InstructionSet::OpenCL_std ||
324 Set == SPIRV::InstructionSet::GLSL_std_450)
325 Prefix = "f";
326 break;
327 }
328
329 // If argument-type name prefix was added, look up the builtin again.
330 if (!Prefix.empty() &&
331 (Builtin = SPIRV::lookupBuiltin(Prefix + BuiltinName, Set)))
332 return std::make_unique<SPIRV::IncomingCall>(
333 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
334
335 // If lookup with a prefix failed, find a suffix to be added to the
336 // builtin's name for lookup. For example, OpenCL "group_reduce_max" taking
337 // an unsigned value has a suffix "u".
338 std::string Suffix;
339
340 switch (FirstArgumentType) {
341 // Unsigned:
342 case 'u':
343 Suffix = "u";
344 break;
345 // Signed:
346 case 'c':
347 case 's':
348 case 'i':
349 case 'l':
350 Suffix = "s";
351 break;
352 // Floating-point:
353 case 'f':
354 case 'd':
355 case 'h':
356 Suffix = "f";
357 break;
358 }
359
360 // If argument-type name suffix was added, look up the builtin again.
361 if (!Suffix.empty() &&
362 (Builtin = SPIRV::lookupBuiltin(BuiltinName + Suffix, Set)))
363 return std::make_unique<SPIRV::IncomingCall>(
364 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
365 }
366
367 // No builtin with such name was found in the set.
368 return nullptr;
369}
370
373 // We expect the following sequence of instructions:
374 // %0:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.alloca)
375 // or = G_GLOBAL_VALUE @block_literal_global
376 // %1:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.bitcast), %0
377 // %2:_(p4) = G_ADDRSPACE_CAST %1:_(pN)
378 MachineInstr *MI = MRI->getUniqueVRegDef(ParamReg);
379 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST &&
380 MI->getOperand(1).isReg());
381 Register BitcastReg = MI->getOperand(1).getReg();
382 MachineInstr *BitcastMI = MRI->getUniqueVRegDef(BitcastReg);
383 assert(isSpvIntrinsic(*BitcastMI, Intrinsic::spv_bitcast) &&
384 BitcastMI->getOperand(2).isReg());
385 Register ValueReg = BitcastMI->getOperand(2).getReg();
386 MachineInstr *ValueMI = MRI->getUniqueVRegDef(ValueReg);
387 return ValueMI;
388}
389
390// Return an integer constant corresponding to the given register and
391// defined in spv_track_constant.
392// TODO: maybe unify with prelegalizer pass.
394 MachineInstr *DefMI = MRI->getUniqueVRegDef(Reg);
395 assert(DefMI->getOpcode() == TargetOpcode::G_CONSTANT &&
396 DefMI->getOperand(1).isCImm());
397 return DefMI->getOperand(1).getCImm()->getValue().getZExtValue();
398}
399
400// Return type of the instruction result from spv_assign_type intrinsic.
401// TODO: maybe unify with prelegalizer pass.
403 MachineInstr *NextMI = MI->getNextNode();
404 if (!NextMI)
405 return nullptr;
406 if (isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_name))
407 if ((NextMI = NextMI->getNextNode()) == nullptr)
408 return nullptr;
409 Register ValueReg = MI->getOperand(0).getReg();
410 if ((!isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_type) &&
411 !isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_ptr_type)) ||
412 NextMI->getOperand(1).getReg() != ValueReg)
413 return nullptr;
414 Type *Ty = getMDOperandAsType(NextMI->getOperand(2).getMetadata(), 0);
415 assert(Ty && "Type is expected");
416 return Ty;
417}
418
419static const Type *getBlockStructType(Register ParamReg,
421 // In principle, this information should be passed to us from Clang via
422 // an elementtype attribute. However, said attribute requires that
423 // the function call be an intrinsic, which is not. Instead, we rely on being
424 // able to trace this to the declaration of a variable: OpenCL C specification
425 // section 6.12.5 should guarantee that we can do this.
427 if (MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE)
428 return MI->getOperand(1).getGlobal()->getType();
429 assert(isSpvIntrinsic(*MI, Intrinsic::spv_alloca) &&
430 "Blocks in OpenCL C must be traceable to allocation site");
431 return getMachineInstrType(MI);
432}
433
434//===----------------------------------------------------------------------===//
435// Helper functions for building misc instructions
436//===----------------------------------------------------------------------===//
437
438/// Helper function building either a resulting scalar or vector bool register
439/// depending on the expected \p ResultType.
440///
441/// \returns Tuple of the resulting register and its type.
442static std::tuple<Register, SPIRVType *>
443buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType,
445 LLT Type;
446 SPIRVType *BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder, true);
447
448 if (ResultType->getOpcode() == SPIRV::OpTypeVector) {
449 unsigned VectorElements = ResultType->getOperand(2).getImm();
450 BoolType = GR->getOrCreateSPIRVVectorType(BoolType, VectorElements,
451 MIRBuilder, true);
454 Type = LLT::vector(LLVMVectorType->getElementCount(), 1);
455 } else {
456 Type = LLT::scalar(1);
457 }
458
459 Register ResultRegister =
461 MIRBuilder.getMRI()->setRegClass(ResultRegister, GR->getRegClass(ResultType));
462 GR->assignSPIRVTypeToVReg(BoolType, ResultRegister, MIRBuilder.getMF());
463 return std::make_tuple(ResultRegister, BoolType);
464}
465
466/// Helper function for building either a vector or scalar select instruction
467/// depending on the expected \p ResultType.
468static bool buildSelectInst(MachineIRBuilder &MIRBuilder,
469 Register ReturnRegister, Register SourceRegister,
470 const SPIRVType *ReturnType,
472 Register TrueConst, FalseConst;
473
474 if (ReturnType->getOpcode() == SPIRV::OpTypeVector) {
475 unsigned Bits = GR->getScalarOrVectorBitWidth(ReturnType);
477 TrueConst =
478 GR->getOrCreateConsIntVector(AllOnes, MIRBuilder, ReturnType, true);
479 FalseConst = GR->getOrCreateConsIntVector(0, MIRBuilder, ReturnType, true);
480 } else {
481 TrueConst = GR->buildConstantInt(1, MIRBuilder, ReturnType, true);
482 FalseConst = GR->buildConstantInt(0, MIRBuilder, ReturnType, true);
483 }
484
485 return MIRBuilder.buildSelect(ReturnRegister, SourceRegister, TrueConst,
486 FalseConst);
487}
488
489/// Helper function for building a load instruction loading into the
490/// \p DestinationReg.
492 MachineIRBuilder &MIRBuilder,
493 SPIRVGlobalRegistry *GR, LLT LowLevelType,
494 Register DestinationReg = Register(0)) {
495 if (!DestinationReg.isValid())
496 DestinationReg = createVirtualRegister(BaseType, GR, MIRBuilder);
497 // TODO: consider using correct address space and alignment (p0 is canonical
498 // type for selection though).
500 MIRBuilder.buildLoad(DestinationReg, PtrRegister, PtrInfo, Align());
501 return DestinationReg;
502}
503
504/// Helper function for building a load instruction for loading a builtin global
505/// variable of \p BuiltinValue value.
507 MachineIRBuilder &MIRBuilder, SPIRVType *VariableType,
508 SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType,
509 Register Reg = Register(0), bool isConst = true,
510 const std::optional<SPIRV::LinkageType::LinkageType> &LinkageTy = {
511 SPIRV::LinkageType::Import}) {
512 Register NewRegister =
513 MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::pIDRegClass);
514 MIRBuilder.getMRI()->setType(
515 NewRegister,
516 LLT::pointer(storageClassToAddressSpace(SPIRV::StorageClass::Function),
517 GR->getPointerSize()));
519 VariableType, MIRBuilder, SPIRV::StorageClass::Input);
520 GR->assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
521
522 // Set up the global OpVariable with the necessary builtin decorations.
523 Register Variable = GR->buildGlobalVariable(
524 NewRegister, PtrType, getLinkStringForBuiltIn(BuiltinValue), nullptr,
525 SPIRV::StorageClass::Input, nullptr, /* isConst= */ isConst, LinkageTy,
526 MIRBuilder, false);
527
528 // Load the value from the global variable.
529 Register LoadedRegister =
530 buildLoadInst(VariableType, Variable, MIRBuilder, GR, LLType, Reg);
531 MIRBuilder.getMRI()->setType(LoadedRegister, LLType);
532 return LoadedRegister;
533}
534
535/// Helper external function for assigning SPIRVType to a register, ensuring the
536/// register class and type are set in MRI. Defined in SPIRVPreLegalizer.cpp.
537extern void updateRegType(Register Reg, Type *Ty, SPIRVType *SpirvTy,
540
541// TODO: Move to TableGen.
542static SPIRV::MemorySemantics::MemorySemantics
543getSPIRVMemSemantics(std::memory_order MemOrder) {
544 switch (MemOrder) {
545 case std::memory_order_relaxed:
546 return SPIRV::MemorySemantics::None;
547 case std::memory_order_acquire:
548 return SPIRV::MemorySemantics::Acquire;
549 case std::memory_order_release:
550 return SPIRV::MemorySemantics::Release;
551 case std::memory_order_acq_rel:
552 return SPIRV::MemorySemantics::AcquireRelease;
553 case std::memory_order_seq_cst:
554 return SPIRV::MemorySemantics::SequentiallyConsistent;
555 default:
556 report_fatal_error("Unknown CL memory scope");
557 }
558}
559
560static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope) {
561 switch (ClScope) {
562 case SPIRV::CLMemoryScope::memory_scope_work_item:
563 return SPIRV::Scope::Invocation;
564 case SPIRV::CLMemoryScope::memory_scope_work_group:
565 return SPIRV::Scope::Workgroup;
566 case SPIRV::CLMemoryScope::memory_scope_device:
567 return SPIRV::Scope::Device;
568 case SPIRV::CLMemoryScope::memory_scope_all_svm_devices:
569 return SPIRV::Scope::CrossDevice;
570 case SPIRV::CLMemoryScope::memory_scope_sub_group:
571 return SPIRV::Scope::Subgroup;
572 }
573 report_fatal_error("Unknown CL memory scope");
574}
575
577 MachineIRBuilder &MIRBuilder,
579 return GR->buildConstantInt(
580 Val, MIRBuilder, GR->getOrCreateSPIRVIntegerType(32, MIRBuilder), true);
581}
582
583static Register buildScopeReg(Register CLScopeRegister,
584 SPIRV::Scope::Scope Scope,
585 MachineIRBuilder &MIRBuilder,
588 if (CLScopeRegister.isValid()) {
589 auto CLScope =
590 static_cast<SPIRV::CLMemoryScope>(getIConstVal(CLScopeRegister, MRI));
591 Scope = getSPIRVScope(CLScope);
592
593 if (CLScope == static_cast<unsigned>(Scope)) {
594 MRI->setRegClass(CLScopeRegister, &SPIRV::iIDRegClass);
595 return CLScopeRegister;
596 }
597 }
598 return buildConstantIntReg32(Scope, MIRBuilder, GR);
599}
600
603 if (MRI->getRegClassOrNull(Reg))
604 return;
605 SPIRVType *SpvType = GR->getSPIRVTypeForVReg(Reg);
606 MRI->setRegClass(Reg,
607 SpvType ? GR->getRegClass(SpvType) : &SPIRV::iIDRegClass);
608}
609
610static Register buildMemSemanticsReg(Register SemanticsRegister,
611 Register PtrRegister, unsigned &Semantics,
612 MachineIRBuilder &MIRBuilder,
614 if (SemanticsRegister.isValid()) {
615 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
616 std::memory_order Order =
617 static_cast<std::memory_order>(getIConstVal(SemanticsRegister, MRI));
618 Semantics =
619 getSPIRVMemSemantics(Order) |
621 if (static_cast<unsigned>(Order) == Semantics) {
622 MRI->setRegClass(SemanticsRegister, &SPIRV::iIDRegClass);
623 return SemanticsRegister;
624 }
625 }
626 return buildConstantIntReg32(Semantics, MIRBuilder, GR);
627}
628
629static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode,
631 Register TypeReg,
632 ArrayRef<uint32_t> ImmArgs = {}) {
633 auto MIB = MIRBuilder.buildInstr(Opcode);
634 if (TypeReg.isValid())
635 MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
636 unsigned Sz = Call->Arguments.size() - ImmArgs.size();
637 for (unsigned i = 0; i < Sz; ++i)
638 MIB.addUse(Call->Arguments[i]);
639 for (uint32_t ImmArg : ImmArgs)
640 MIB.addImm(ImmArg);
641 return true;
642}
643
644/// Helper function for translating atomic init to OpStore.
646 MachineIRBuilder &MIRBuilder) {
647 if (Call->isSpirvOp())
648 return buildOpFromWrapper(MIRBuilder, SPIRV::OpStore, Call, Register(0));
649
650 assert(Call->Arguments.size() == 2 &&
651 "Need 2 arguments for atomic init translation");
652 MIRBuilder.buildInstr(SPIRV::OpStore)
653 .addUse(Call->Arguments[0])
654 .addUse(Call->Arguments[1]);
655 return true;
656}
657
658/// Helper function for building an atomic load instruction.
660 MachineIRBuilder &MIRBuilder,
662 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
663 if (Call->isSpirvOp())
664 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicLoad, Call, TypeReg);
665
666 Register PtrRegister = Call->Arguments[0];
667 // TODO: if true insert call to __translate_ocl_memory_sccope before
668 // OpAtomicLoad and the function implementation. We can use Translator's
669 // output for transcoding/atomic_explicit_arguments.cl as an example.
670 Register ScopeRegister =
671 Call->Arguments.size() > 1
672 ? Call->Arguments[1]
673 : buildConstantIntReg32(SPIRV::Scope::Device, MIRBuilder, GR);
674 Register MemSemanticsReg;
675 if (Call->Arguments.size() > 2) {
676 // TODO: Insert call to __translate_ocl_memory_order before OpAtomicLoad.
677 MemSemanticsReg = Call->Arguments[2];
678 } else {
679 int Semantics =
680 SPIRV::MemorySemantics::SequentiallyConsistent |
682 MemSemanticsReg = buildConstantIntReg32(Semantics, MIRBuilder, GR);
683 }
684
685 MIRBuilder.buildInstr(SPIRV::OpAtomicLoad)
686 .addDef(Call->ReturnRegister)
687 .addUse(TypeReg)
688 .addUse(PtrRegister)
689 .addUse(ScopeRegister)
690 .addUse(MemSemanticsReg);
691 return true;
692}
693
694/// Helper function for building an atomic store instruction.
696 MachineIRBuilder &MIRBuilder,
698 if (Call->isSpirvOp())
699 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicStore, Call,
700 Register(0));
701
702 Register ScopeRegister =
703 buildConstantIntReg32(SPIRV::Scope::Device, MIRBuilder, GR);
704 Register PtrRegister = Call->Arguments[0];
705 int Semantics =
706 SPIRV::MemorySemantics::SequentiallyConsistent |
708 Register MemSemanticsReg = buildConstantIntReg32(Semantics, MIRBuilder, GR);
709 MIRBuilder.buildInstr(SPIRV::OpAtomicStore)
710 .addUse(PtrRegister)
711 .addUse(ScopeRegister)
712 .addUse(MemSemanticsReg)
713 .addUse(Call->Arguments[1]);
714 return true;
715}
716
717/// Helper function for building an atomic compare-exchange instruction.
719 const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin,
720 unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
721 if (Call->isSpirvOp())
722 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
723 GR->getSPIRVTypeID(Call->ReturnType));
724
725 bool IsCmpxchg = Call->Builtin->Name.contains("cmpxchg");
726 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
727
728 Register ObjectPtr = Call->Arguments[0]; // Pointer (volatile A *object.)
729 Register ExpectedArg = Call->Arguments[1]; // Comparator (C* expected).
730 Register Desired = Call->Arguments[2]; // Value (C Desired).
731 SPIRVType *SpvDesiredTy = GR->getSPIRVTypeForVReg(Desired);
732 LLT DesiredLLT = MRI->getType(Desired);
733
734 assert(GR->getSPIRVTypeForVReg(ObjectPtr)->getOpcode() ==
735 SPIRV::OpTypePointer);
736 unsigned ExpectedType = GR->getSPIRVTypeForVReg(ExpectedArg)->getOpcode();
737 (void)ExpectedType;
738 assert(IsCmpxchg ? ExpectedType == SPIRV::OpTypeInt
739 : ExpectedType == SPIRV::OpTypePointer);
740 assert(GR->isScalarOfType(Desired, SPIRV::OpTypeInt));
741
742 SPIRVType *SpvObjectPtrTy = GR->getSPIRVTypeForVReg(ObjectPtr);
743 assert(SpvObjectPtrTy->getOperand(2).isReg() && "SPIRV type is expected");
744 auto StorageClass = static_cast<SPIRV::StorageClass::StorageClass>(
745 SpvObjectPtrTy->getOperand(1).getImm());
746 auto MemSemStorage = getMemSemanticsForStorageClass(StorageClass);
747
748 Register MemSemEqualReg;
749 Register MemSemUnequalReg;
750 uint64_t MemSemEqual =
751 IsCmpxchg
752 ? SPIRV::MemorySemantics::None
753 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
754 uint64_t MemSemUnequal =
755 IsCmpxchg
756 ? SPIRV::MemorySemantics::None
757 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
758 if (Call->Arguments.size() >= 4) {
759 assert(Call->Arguments.size() >= 5 &&
760 "Need 5+ args for explicit atomic cmpxchg");
761 auto MemOrdEq =
762 static_cast<std::memory_order>(getIConstVal(Call->Arguments[3], MRI));
763 auto MemOrdNeq =
764 static_cast<std::memory_order>(getIConstVal(Call->Arguments[4], MRI));
765 MemSemEqual = getSPIRVMemSemantics(MemOrdEq) | MemSemStorage;
766 MemSemUnequal = getSPIRVMemSemantics(MemOrdNeq) | MemSemStorage;
767 if (static_cast<unsigned>(MemOrdEq) == MemSemEqual)
768 MemSemEqualReg = Call->Arguments[3];
769 if (static_cast<unsigned>(MemOrdNeq) == MemSemEqual)
770 MemSemUnequalReg = Call->Arguments[4];
771 }
772 if (!MemSemEqualReg.isValid())
773 MemSemEqualReg = buildConstantIntReg32(MemSemEqual, MIRBuilder, GR);
774 if (!MemSemUnequalReg.isValid())
775 MemSemUnequalReg = buildConstantIntReg32(MemSemUnequal, MIRBuilder, GR);
776
777 Register ScopeReg;
778 auto Scope = IsCmpxchg ? SPIRV::Scope::Workgroup : SPIRV::Scope::Device;
779 if (Call->Arguments.size() >= 6) {
780 assert(Call->Arguments.size() == 6 &&
781 "Extra args for explicit atomic cmpxchg");
782 auto ClScope = static_cast<SPIRV::CLMemoryScope>(
783 getIConstVal(Call->Arguments[5], MRI));
784 Scope = getSPIRVScope(ClScope);
785 if (ClScope == static_cast<unsigned>(Scope))
786 ScopeReg = Call->Arguments[5];
787 }
788 if (!ScopeReg.isValid())
789 ScopeReg = buildConstantIntReg32(Scope, MIRBuilder, GR);
790
791 Register Expected = IsCmpxchg
792 ? ExpectedArg
793 : buildLoadInst(SpvDesiredTy, ExpectedArg, MIRBuilder,
794 GR, LLT::scalar(64));
795 MRI->setType(Expected, DesiredLLT);
796 Register Tmp = !IsCmpxchg ? MRI->createGenericVirtualRegister(DesiredLLT)
797 : Call->ReturnRegister;
798 if (!MRI->getRegClassOrNull(Tmp))
799 MRI->setRegClass(Tmp, GR->getRegClass(SpvDesiredTy));
800 GR->assignSPIRVTypeToVReg(SpvDesiredTy, Tmp, MIRBuilder.getMF());
801
802 MIRBuilder.buildInstr(Opcode)
803 .addDef(Tmp)
804 .addUse(GR->getSPIRVTypeID(SpvDesiredTy))
805 .addUse(ObjectPtr)
806 .addUse(ScopeReg)
807 .addUse(MemSemEqualReg)
808 .addUse(MemSemUnequalReg)
809 .addUse(Desired)
811 if (!IsCmpxchg) {
812 MIRBuilder.buildInstr(SPIRV::OpStore).addUse(ExpectedArg).addUse(Tmp);
813 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Call->ReturnRegister, Tmp, Expected);
814 }
815 return true;
816}
817
818/// Helper function for building atomic instructions.
819static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
820 MachineIRBuilder &MIRBuilder,
822 if (Call->isSpirvOp())
823 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
824 GR->getSPIRVTypeID(Call->ReturnType));
825
826 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
827 Register ScopeRegister =
828 Call->Arguments.size() >= 4 ? Call->Arguments[3] : Register();
829
830 assert(Call->Arguments.size() <= 4 &&
831 "Too many args for explicit atomic RMW");
832 ScopeRegister = buildScopeReg(ScopeRegister, SPIRV::Scope::Workgroup,
833 MIRBuilder, GR, MRI);
834
835 Register PtrRegister = Call->Arguments[0];
836 unsigned Semantics = SPIRV::MemorySemantics::None;
837 Register MemSemanticsReg =
838 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
839 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
840 Semantics, MIRBuilder, GR);
841 Register ValueReg = Call->Arguments[1];
842 Register ValueTypeReg = GR->getSPIRVTypeID(Call->ReturnType);
843 // support cl_ext_float_atomics
844 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeFloat) {
845 if (Opcode == SPIRV::OpAtomicIAdd) {
846 Opcode = SPIRV::OpAtomicFAddEXT;
847 } else if (Opcode == SPIRV::OpAtomicISub) {
848 // Translate OpAtomicISub applied to a floating type argument to
849 // OpAtomicFAddEXT with the negative value operand
850 Opcode = SPIRV::OpAtomicFAddEXT;
851 Register NegValueReg =
852 MRI->createGenericVirtualRegister(MRI->getType(ValueReg));
853 MRI->setRegClass(NegValueReg, GR->getRegClass(Call->ReturnType));
854 GR->assignSPIRVTypeToVReg(Call->ReturnType, NegValueReg,
855 MIRBuilder.getMF());
856 MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
857 .addDef(NegValueReg)
858 .addUse(ValueReg);
859 updateRegType(NegValueReg, nullptr, Call->ReturnType, GR, MIRBuilder,
860 MIRBuilder.getMF().getRegInfo());
861 ValueReg = NegValueReg;
862 }
863 }
864 MIRBuilder.buildInstr(Opcode)
865 .addDef(Call->ReturnRegister)
866 .addUse(ValueTypeReg)
867 .addUse(PtrRegister)
868 .addUse(ScopeRegister)
869 .addUse(MemSemanticsReg)
870 .addUse(ValueReg);
871 return true;
872}
873
874/// Helper function for building an atomic floating-type instruction.
876 unsigned Opcode,
877 MachineIRBuilder &MIRBuilder,
879 assert(Call->Arguments.size() == 4 &&
880 "Wrong number of atomic floating-type builtin");
881 Register PtrReg = Call->Arguments[0];
882 Register ScopeReg = Call->Arguments[1];
883 Register MemSemanticsReg = Call->Arguments[2];
884 Register ValueReg = Call->Arguments[3];
885 MIRBuilder.buildInstr(Opcode)
886 .addDef(Call->ReturnRegister)
887 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
888 .addUse(PtrReg)
889 .addUse(ScopeReg)
890 .addUse(MemSemanticsReg)
891 .addUse(ValueReg);
892 return true;
893}
894
895/// Helper function for building atomic flag instructions (e.g.
896/// OpAtomicFlagTestAndSet).
898 unsigned Opcode, MachineIRBuilder &MIRBuilder,
900 bool IsSet = Opcode == SPIRV::OpAtomicFlagTestAndSet;
901 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
902 if (Call->isSpirvOp())
903 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
904 IsSet ? TypeReg : Register(0));
905
906 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
907 Register PtrRegister = Call->Arguments[0];
908 unsigned Semantics = SPIRV::MemorySemantics::SequentiallyConsistent;
909 Register MemSemanticsReg =
910 Call->Arguments.size() >= 2 ? Call->Arguments[1] : Register();
911 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
912 Semantics, MIRBuilder, GR);
913
914 assert((Opcode != SPIRV::OpAtomicFlagClear ||
915 (Semantics != SPIRV::MemorySemantics::Acquire &&
916 Semantics != SPIRV::MemorySemantics::AcquireRelease)) &&
917 "Invalid memory order argument!");
918
919 Register ScopeRegister =
920 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
921 ScopeRegister =
922 buildScopeReg(ScopeRegister, SPIRV::Scope::Device, MIRBuilder, GR, MRI);
923
924 auto MIB = MIRBuilder.buildInstr(Opcode);
925 if (IsSet)
926 MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
927
928 MIB.addUse(PtrRegister).addUse(ScopeRegister).addUse(MemSemanticsReg);
929 return true;
930}
931
932/// Helper function for building barriers, i.e., memory/control ordering
933/// operations.
934static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
935 MachineIRBuilder &MIRBuilder,
937 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
938 const auto *ST =
939 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
940 if ((Opcode == SPIRV::OpControlBarrierArriveINTEL ||
941 Opcode == SPIRV::OpControlBarrierWaitINTEL) &&
942 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_split_barrier)) {
943 std::string DiagMsg = std::string(Builtin->Name) +
944 ": the builtin requires the following SPIR-V "
945 "extension: SPV_INTEL_split_barrier";
946 report_fatal_error(DiagMsg.c_str(), false);
947 }
948
949 if (Call->isSpirvOp())
950 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
951
952 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
953 unsigned MemFlags = getIConstVal(Call->Arguments[0], MRI);
954 unsigned MemSemantics = SPIRV::MemorySemantics::None;
955
956 if (MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE)
957 MemSemantics |= SPIRV::MemorySemantics::WorkgroupMemory;
958
959 if (MemFlags & SPIRV::CLK_GLOBAL_MEM_FENCE)
960 MemSemantics |= SPIRV::MemorySemantics::CrossWorkgroupMemory;
961
962 if (MemFlags & SPIRV::CLK_IMAGE_MEM_FENCE)
963 MemSemantics |= SPIRV::MemorySemantics::ImageMemory;
964
965 if (Opcode == SPIRV::OpMemoryBarrier)
966 MemSemantics = getSPIRVMemSemantics(static_cast<std::memory_order>(
967 getIConstVal(Call->Arguments[1], MRI))) |
968 MemSemantics;
969 else if (Opcode == SPIRV::OpControlBarrierArriveINTEL)
970 MemSemantics |= SPIRV::MemorySemantics::Release;
971 else if (Opcode == SPIRV::OpControlBarrierWaitINTEL)
972 MemSemantics |= SPIRV::MemorySemantics::Acquire;
973 else
974 MemSemantics |= SPIRV::MemorySemantics::SequentiallyConsistent;
975
976 Register MemSemanticsReg =
977 MemFlags == MemSemantics
978 ? Call->Arguments[0]
979 : buildConstantIntReg32(MemSemantics, MIRBuilder, GR);
980 Register ScopeReg;
981 SPIRV::Scope::Scope Scope = SPIRV::Scope::Workgroup;
982 SPIRV::Scope::Scope MemScope = Scope;
983 if (Call->Arguments.size() >= 2) {
984 assert(
985 ((Opcode != SPIRV::OpMemoryBarrier && Call->Arguments.size() == 2) ||
986 (Opcode == SPIRV::OpMemoryBarrier && Call->Arguments.size() == 3)) &&
987 "Extra args for explicitly scoped barrier");
988 Register ScopeArg = (Opcode == SPIRV::OpMemoryBarrier) ? Call->Arguments[2]
989 : Call->Arguments[1];
990 SPIRV::CLMemoryScope CLScope =
991 static_cast<SPIRV::CLMemoryScope>(getIConstVal(ScopeArg, MRI));
992 MemScope = getSPIRVScope(CLScope);
993 if (!(MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE) ||
994 (Opcode == SPIRV::OpMemoryBarrier))
995 Scope = MemScope;
996 if (CLScope == static_cast<unsigned>(Scope))
997 ScopeReg = Call->Arguments[1];
998 }
999
1000 if (!ScopeReg.isValid())
1001 ScopeReg = buildConstantIntReg32(Scope, MIRBuilder, GR);
1002
1003 auto MIB = MIRBuilder.buildInstr(Opcode).addUse(ScopeReg);
1004 if (Opcode != SPIRV::OpMemoryBarrier)
1005 MIB.addUse(buildConstantIntReg32(MemScope, MIRBuilder, GR));
1006 MIB.addUse(MemSemanticsReg);
1007 return true;
1008}
1009
1010/// Helper function for building extended bit operations.
1012 unsigned Opcode,
1013 MachineIRBuilder &MIRBuilder,
1014 SPIRVGlobalRegistry *GR) {
1015 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1016 const auto *ST =
1017 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
1018 if ((Opcode == SPIRV::OpBitFieldInsert ||
1019 Opcode == SPIRV::OpBitFieldSExtract ||
1020 Opcode == SPIRV::OpBitFieldUExtract || Opcode == SPIRV::OpBitReverse) &&
1021 !ST->canUseExtension(SPIRV::Extension::SPV_KHR_bit_instructions)) {
1022 std::string DiagMsg = std::string(Builtin->Name) +
1023 ": the builtin requires the following SPIR-V "
1024 "extension: SPV_KHR_bit_instructions";
1025 report_fatal_error(DiagMsg.c_str(), false);
1026 }
1027
1028 // Generate SPIRV instruction accordingly.
1029 if (Call->isSpirvOp())
1030 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1031 GR->getSPIRVTypeID(Call->ReturnType));
1032
1033 auto MIB = MIRBuilder.buildInstr(Opcode)
1034 .addDef(Call->ReturnRegister)
1035 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1036 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1037 MIB.addUse(Call->Arguments[i]);
1038
1039 return true;
1040}
1041
1042/// Helper function for building Intel's bindless image instructions.
1044 unsigned Opcode,
1045 MachineIRBuilder &MIRBuilder,
1046 SPIRVGlobalRegistry *GR) {
1047 // Generate SPIRV instruction accordingly.
1048 if (Call->isSpirvOp())
1049 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1050 GR->getSPIRVTypeID(Call->ReturnType));
1051
1052 MIRBuilder.buildInstr(Opcode)
1053 .addDef(Call->ReturnRegister)
1054 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1055 .addUse(Call->Arguments[0]);
1056
1057 return true;
1058}
1059
1060/// Helper function for building Intel's OpBitwiseFunctionINTEL instruction.
1062 const SPIRV::IncomingCall *Call, unsigned Opcode,
1063 MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
1064 // Generate SPIRV instruction accordingly.
1065 if (Call->isSpirvOp())
1066 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1067 GR->getSPIRVTypeID(Call->ReturnType));
1068
1069 auto MIB = MIRBuilder.buildInstr(Opcode)
1070 .addDef(Call->ReturnRegister)
1071 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1072 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1073 MIB.addUse(Call->Arguments[i]);
1074
1075 return true;
1076}
1077
1079 unsigned Opcode,
1080 MachineIRBuilder &MIRBuilder,
1081 SPIRVGlobalRegistry *GR) {
1082 if (Call->isSpirvOp())
1083 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1084 GR->getSPIRVTypeID(Call->ReturnType));
1085
1086 auto MIB = MIRBuilder.buildInstr(Opcode)
1087 .addDef(Call->ReturnRegister)
1088 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1089 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1090 MIB.addUse(Call->Arguments[i]);
1091
1092 return true;
1093}
1094
1095/// Helper function for building Intel's 2d block io instructions.
1097 unsigned Opcode,
1098 MachineIRBuilder &MIRBuilder,
1099 SPIRVGlobalRegistry *GR) {
1100 // Generate SPIRV instruction accordingly.
1101 if (Call->isSpirvOp())
1102 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
1103
1104 auto MIB = MIRBuilder.buildInstr(Opcode)
1105 .addDef(Call->ReturnRegister)
1106 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1107 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1108 MIB.addUse(Call->Arguments[i]);
1109
1110 return true;
1111}
1112
1113static bool buildPipeInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
1114 unsigned Scope, MachineIRBuilder &MIRBuilder,
1115 SPIRVGlobalRegistry *GR) {
1116 switch (Opcode) {
1117 case SPIRV::OpCommitReadPipe:
1118 case SPIRV::OpCommitWritePipe:
1119 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
1120 case SPIRV::OpGroupCommitReadPipe:
1121 case SPIRV::OpGroupCommitWritePipe:
1122 case SPIRV::OpGroupReserveReadPipePackets:
1123 case SPIRV::OpGroupReserveWritePipePackets: {
1124 Register ScopeConstReg =
1125 MIRBuilder.buildConstant(LLT::scalar(32), Scope).getReg(0);
1126 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1127 MRI->setRegClass(ScopeConstReg, &SPIRV::iIDRegClass);
1129 MIB = MIRBuilder.buildInstr(Opcode);
1130 // Add Return register and type.
1131 if (Opcode == SPIRV::OpGroupReserveReadPipePackets ||
1132 Opcode == SPIRV::OpGroupReserveWritePipePackets)
1133 MIB.addDef(Call->ReturnRegister)
1134 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1135
1136 MIB.addUse(ScopeConstReg);
1137 for (unsigned int i = 0; i < Call->Arguments.size(); ++i)
1138 MIB.addUse(Call->Arguments[i]);
1139
1140 return true;
1141 }
1142 default:
1143 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1144 GR->getSPIRVTypeID(Call->ReturnType));
1145 }
1146}
1147
1148static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim) {
1149 switch (dim) {
1150 case SPIRV::Dim::DIM_1D:
1151 case SPIRV::Dim::DIM_Buffer:
1152 return 1;
1153 case SPIRV::Dim::DIM_2D:
1154 case SPIRV::Dim::DIM_Cube:
1155 case SPIRV::Dim::DIM_Rect:
1156 return 2;
1157 case SPIRV::Dim::DIM_3D:
1158 return 3;
1159 default:
1160 report_fatal_error("Cannot get num components for given Dim");
1161 }
1162}
1163
1164/// Helper function for obtaining the number of size components.
1165static unsigned getNumSizeComponents(SPIRVType *imgType) {
1166 assert(imgType->getOpcode() == SPIRV::OpTypeImage);
1167 auto dim = static_cast<SPIRV::Dim::Dim>(imgType->getOperand(2).getImm());
1168 unsigned numComps = getNumComponentsForDim(dim);
1169 bool arrayed = imgType->getOperand(4).getImm() == 1;
1170 return arrayed ? numComps + 1 : numComps;
1171}
1172
1173static bool builtinMayNeedPromotionToVec(uint32_t BuiltinNumber) {
1174 switch (BuiltinNumber) {
1175 case SPIRV::OpenCLExtInst::s_min:
1176 case SPIRV::OpenCLExtInst::u_min:
1177 case SPIRV::OpenCLExtInst::s_max:
1178 case SPIRV::OpenCLExtInst::u_max:
1179 case SPIRV::OpenCLExtInst::fmax:
1180 case SPIRV::OpenCLExtInst::fmin:
1181 case SPIRV::OpenCLExtInst::fmax_common:
1182 case SPIRV::OpenCLExtInst::fmin_common:
1183 case SPIRV::OpenCLExtInst::s_clamp:
1184 case SPIRV::OpenCLExtInst::fclamp:
1185 case SPIRV::OpenCLExtInst::u_clamp:
1186 case SPIRV::OpenCLExtInst::mix:
1187 case SPIRV::OpenCLExtInst::step:
1188 case SPIRV::OpenCLExtInst::smoothstep:
1189 return true;
1190 default:
1191 break;
1192 }
1193 return false;
1194}
1195
1196//===----------------------------------------------------------------------===//
1197// Implementation functions for each builtin group
1198//===----------------------------------------------------------------------===//
1199
1202 MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
1203
1204 Register ReturnTypeId = GR->getSPIRVTypeID(Call->ReturnType);
1205 unsigned ResultElementCount =
1206 GR->getScalarOrVectorComponentCount(ReturnTypeId);
1207 bool MayNeedPromotionToVec =
1208 builtinMayNeedPromotionToVec(BuiltinNumber) && ResultElementCount > 1;
1209
1210 if (!MayNeedPromotionToVec)
1211 return {Call->Arguments.begin(), Call->Arguments.end()};
1212
1214 for (Register Argument : Call->Arguments) {
1215 Register VecArg = Argument;
1216 SPIRVType *ArgumentType = GR->getSPIRVTypeForVReg(Argument);
1217 if (ArgumentType != Call->ReturnType) {
1218 VecArg = createVirtualRegister(Call->ReturnType, GR, MIRBuilder);
1219 auto VecSplat = MIRBuilder.buildInstr(SPIRV::OpCompositeConstruct)
1220 .addDef(VecArg)
1221 .addUse(ReturnTypeId);
1222 for (unsigned I = 0; I != ResultElementCount; ++I)
1223 VecSplat.addUse(Argument);
1224 }
1225 Arguments.push_back(VecArg);
1226 }
1227 return Arguments;
1228}
1229
1231 MachineIRBuilder &MIRBuilder,
1232 SPIRVGlobalRegistry *GR, const CallBase &CB) {
1233 // Lookup the extended instruction number in the TableGen records.
1234 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1236 SPIRV::lookupExtendedBuiltin(Builtin->Name, Builtin->Set)->Number;
1237 // fmin_common and fmax_common are now deprecated, and we should use fmin and
1238 // fmax with NotInf and NotNaN flags instead. Keep original number to add
1239 // later the NoNans and NoInfs flags.
1240 uint32_t OrigNumber = Number;
1241 const SPIRVSubtarget &ST =
1242 cast<SPIRVSubtarget>(MIRBuilder.getMF().getSubtarget());
1243 if (ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2) &&
1244 (Number == SPIRV::OpenCLExtInst::fmin_common ||
1245 Number == SPIRV::OpenCLExtInst::fmax_common)) {
1246 Number = (Number == SPIRV::OpenCLExtInst::fmin_common)
1247 ? SPIRV::OpenCLExtInst::fmin
1248 : SPIRV::OpenCLExtInst::fmax;
1249 }
1250
1251 Register ReturnTypeId = GR->getSPIRVTypeID(Call->ReturnType);
1253 getBuiltinCallArguments(Call, Number, MIRBuilder, GR);
1254
1256 if (ST.canUseExtension(SPIRV::Extension::SPV_KHR_fma) &&
1257 Number == SPIRV::OpenCLExtInst::fma) {
1258 // Use the SPIR-V fma instruction instead of the OpenCL extended
1259 // instruction if the extension is available.
1260 MIB = MIRBuilder.buildInstr(SPIRV::OpFmaKHR)
1261 .addDef(Call->ReturnRegister)
1262 .addUse(ReturnTypeId);
1263 } else {
1264 // Build extended instruction.
1265 MIB = MIRBuilder.buildInstr(SPIRV::OpExtInst)
1266 .addDef(Call->ReturnRegister)
1267 .addUse(ReturnTypeId)
1268 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
1269 .addImm(Number);
1270 }
1271
1273 MIB.addUse(Argument);
1274
1275 MIB.getInstr()->copyIRFlags(CB);
1276 if (OrigNumber == SPIRV::OpenCLExtInst::fmin_common ||
1277 OrigNumber == SPIRV::OpenCLExtInst::fmax_common) {
1278 // Add NoNans and NoInfs flags to fmin/fmax instruction.
1281 }
1282 return true;
1283}
1284
1286 MachineIRBuilder &MIRBuilder,
1287 SPIRVGlobalRegistry *GR) {
1288 // Lookup the instruction opcode in the TableGen records.
1289 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1290 unsigned Opcode =
1291 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1292
1293 Register CompareRegister;
1294 SPIRVType *RelationType;
1295 std::tie(CompareRegister, RelationType) =
1296 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
1297
1298 // Build relational instruction.
1299 auto MIB = MIRBuilder.buildInstr(Opcode)
1300 .addDef(CompareRegister)
1301 .addUse(GR->getSPIRVTypeID(RelationType));
1302
1303 for (auto Argument : Call->Arguments)
1304 MIB.addUse(Argument);
1305
1306 // Build select instruction.
1307 return buildSelectInst(MIRBuilder, Call->ReturnRegister, CompareRegister,
1308 Call->ReturnType, GR);
1309}
1310
1312 MachineIRBuilder &MIRBuilder,
1313 SPIRVGlobalRegistry *GR) {
1314 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1315 const SPIRV::GroupBuiltin *GroupBuiltin =
1316 SPIRV::lookupGroupBuiltin(Builtin->Name);
1317
1318 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1319 if (Call->isSpirvOp()) {
1320 if (GroupBuiltin->NoGroupOperation) {
1322 if (GroupBuiltin->Opcode ==
1323 SPIRV::OpSubgroupMatrixMultiplyAccumulateINTEL &&
1324 Call->Arguments.size() > 4)
1325 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[4], MRI));
1326 return buildOpFromWrapper(MIRBuilder, GroupBuiltin->Opcode, Call,
1327 GR->getSPIRVTypeID(Call->ReturnType), ImmArgs);
1328 }
1329
1330 // Group Operation is a literal
1331 Register GroupOpReg = Call->Arguments[1];
1332 const MachineInstr *MI = getDefInstrMaybeConstant(GroupOpReg, MRI);
1333 if (!MI || MI->getOpcode() != TargetOpcode::G_CONSTANT)
1335 "Group Operation parameter must be an integer constant");
1336 uint64_t GrpOp = MI->getOperand(1).getCImm()->getValue().getZExtValue();
1337 Register ScopeReg = Call->Arguments[0];
1338 auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode)
1339 .addDef(Call->ReturnRegister)
1340 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1341 .addUse(ScopeReg)
1342 .addImm(GrpOp);
1343 for (unsigned i = 2; i < Call->Arguments.size(); ++i)
1344 MIB.addUse(Call->Arguments[i]);
1345 return true;
1346 }
1347
1348 Register Arg0;
1349 if (GroupBuiltin->HasBoolArg) {
1350 SPIRVType *BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder, true);
1351 Register BoolReg = Call->Arguments[0];
1352 SPIRVType *BoolRegType = GR->getSPIRVTypeForVReg(BoolReg);
1353 if (!BoolRegType)
1354 report_fatal_error("Can't find a register's type definition");
1355 MachineInstr *ArgInstruction = getDefInstrMaybeConstant(BoolReg, MRI);
1356 if (ArgInstruction->getOpcode() == TargetOpcode::G_CONSTANT) {
1357 if (BoolRegType->getOpcode() != SPIRV::OpTypeBool)
1358 Arg0 = GR->buildConstantInt(getIConstVal(BoolReg, MRI), MIRBuilder,
1359 BoolType, true);
1360 } else {
1361 if (BoolRegType->getOpcode() == SPIRV::OpTypeInt) {
1362 Arg0 = MRI->createGenericVirtualRegister(LLT::scalar(1));
1363 MRI->setRegClass(Arg0, &SPIRV::iIDRegClass);
1364 GR->assignSPIRVTypeToVReg(BoolType, Arg0, MIRBuilder.getMF());
1365 MIRBuilder.buildICmp(
1366 CmpInst::ICMP_NE, Arg0, BoolReg,
1367 GR->buildConstantInt(0, MIRBuilder, BoolRegType, true));
1368 updateRegType(Arg0, nullptr, BoolType, GR, MIRBuilder,
1369 MIRBuilder.getMF().getRegInfo());
1370 } else if (BoolRegType->getOpcode() != SPIRV::OpTypeBool) {
1371 report_fatal_error("Expect a boolean argument");
1372 }
1373 // if BoolReg is a boolean register, we don't need to do anything
1374 }
1375 }
1376
1377 Register GroupResultRegister = Call->ReturnRegister;
1378 SPIRVType *GroupResultType = Call->ReturnType;
1379
1380 // TODO: maybe we need to check whether the result type is already boolean
1381 // and in this case do not insert select instruction.
1382 const bool HasBoolReturnTy =
1383 GroupBuiltin->IsElect || GroupBuiltin->IsAllOrAny ||
1384 GroupBuiltin->IsAllEqual || GroupBuiltin->IsLogical ||
1385 GroupBuiltin->IsInverseBallot || GroupBuiltin->IsBallotBitExtract;
1386
1387 if (HasBoolReturnTy)
1388 std::tie(GroupResultRegister, GroupResultType) =
1389 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
1390
1391 auto Scope = Builtin->Name.starts_with("sub_group") ? SPIRV::Scope::Subgroup
1392 : SPIRV::Scope::Workgroup;
1393 Register ScopeRegister = buildConstantIntReg32(Scope, MIRBuilder, GR);
1394
1395 Register VecReg;
1396 if (GroupBuiltin->Opcode == SPIRV::OpGroupBroadcast &&
1397 Call->Arguments.size() > 2) {
1398 // For OpGroupBroadcast "LocalId must be an integer datatype. It must be a
1399 // scalar, a vector with 2 components, or a vector with 3 components.",
1400 // meaning that we must create a vector from the function arguments if
1401 // it's a work_group_broadcast(val, local_id_x, local_id_y) or
1402 // work_group_broadcast(val, local_id_x, local_id_y, local_id_z) call.
1403 Register ElemReg = Call->Arguments[1];
1404 SPIRVType *ElemType = GR->getSPIRVTypeForVReg(ElemReg);
1405 if (!ElemType || ElemType->getOpcode() != SPIRV::OpTypeInt)
1406 report_fatal_error("Expect an integer <LocalId> argument");
1407 unsigned VecLen = Call->Arguments.size() - 1;
1408 VecReg = MRI->createGenericVirtualRegister(
1409 LLT::fixed_vector(VecLen, MRI->getType(ElemReg)));
1410 MRI->setRegClass(VecReg, &SPIRV::vIDRegClass);
1411 SPIRVType *VecType =
1412 GR->getOrCreateSPIRVVectorType(ElemType, VecLen, MIRBuilder, true);
1413 GR->assignSPIRVTypeToVReg(VecType, VecReg, MIRBuilder.getMF());
1414 auto MIB =
1415 MIRBuilder.buildInstr(TargetOpcode::G_BUILD_VECTOR).addDef(VecReg);
1416 for (unsigned i = 1; i < Call->Arguments.size(); i++) {
1417 MIB.addUse(Call->Arguments[i]);
1418 setRegClassIfNull(Call->Arguments[i], MRI, GR);
1419 }
1420 updateRegType(VecReg, nullptr, VecType, GR, MIRBuilder,
1421 MIRBuilder.getMF().getRegInfo());
1422 }
1423
1424 // Build work/sub group instruction.
1425 auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode)
1426 .addDef(GroupResultRegister)
1427 .addUse(GR->getSPIRVTypeID(GroupResultType))
1428 .addUse(ScopeRegister);
1429
1430 if (!GroupBuiltin->NoGroupOperation)
1431 MIB.addImm(GroupBuiltin->GroupOperation);
1432 if (Call->Arguments.size() > 0) {
1433 MIB.addUse(Arg0.isValid() ? Arg0 : Call->Arguments[0]);
1434 setRegClassIfNull(Call->Arguments[0], MRI, GR);
1435 if (VecReg.isValid())
1436 MIB.addUse(VecReg);
1437 else
1438 for (unsigned i = 1; i < Call->Arguments.size(); i++)
1439 MIB.addUse(Call->Arguments[i]);
1440 }
1441
1442 // Build select instruction.
1443 if (HasBoolReturnTy)
1444 buildSelectInst(MIRBuilder, Call->ReturnRegister, GroupResultRegister,
1445 Call->ReturnType, GR);
1446 return true;
1447}
1448
1450 MachineIRBuilder &MIRBuilder,
1451 SPIRVGlobalRegistry *GR) {
1452 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1453 MachineFunction &MF = MIRBuilder.getMF();
1454 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1455 const SPIRV::IntelSubgroupsBuiltin *IntelSubgroups =
1456 SPIRV::lookupIntelSubgroupsBuiltin(Builtin->Name);
1457
1458 if (IntelSubgroups->IsMedia &&
1459 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_media_block_io)) {
1460 std::string DiagMsg = std::string(Builtin->Name) +
1461 ": the builtin requires the following SPIR-V "
1462 "extension: SPV_INTEL_media_block_io";
1463 report_fatal_error(DiagMsg.c_str(), false);
1464 } else if (!IntelSubgroups->IsMedia &&
1465 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1466 std::string DiagMsg = std::string(Builtin->Name) +
1467 ": the builtin requires the following SPIR-V "
1468 "extension: SPV_INTEL_subgroups";
1469 report_fatal_error(DiagMsg.c_str(), false);
1470 }
1471
1472 uint32_t OpCode = IntelSubgroups->Opcode;
1473 if (Call->isSpirvOp()) {
1474 bool IsSet = OpCode != SPIRV::OpSubgroupBlockWriteINTEL &&
1475 OpCode != SPIRV::OpSubgroupImageBlockWriteINTEL &&
1476 OpCode != SPIRV::OpSubgroupImageMediaBlockWriteINTEL;
1477 return buildOpFromWrapper(MIRBuilder, OpCode, Call,
1478 IsSet ? GR->getSPIRVTypeID(Call->ReturnType)
1479 : Register(0));
1480 }
1481
1482 if (IntelSubgroups->IsBlock) {
1483 // Minimal number or arguments set in TableGen records is 1
1484 if (SPIRVType *Arg0Type = GR->getSPIRVTypeForVReg(Call->Arguments[0])) {
1485 if (Arg0Type->getOpcode() == SPIRV::OpTypeImage) {
1486 // TODO: add required validation from the specification:
1487 // "'Image' must be an object whose type is OpTypeImage with a 'Sampled'
1488 // operand of 0 or 2. If the 'Sampled' operand is 2, then some
1489 // dimensions require a capability."
1490 switch (OpCode) {
1491 case SPIRV::OpSubgroupBlockReadINTEL:
1492 OpCode = SPIRV::OpSubgroupImageBlockReadINTEL;
1493 break;
1494 case SPIRV::OpSubgroupBlockWriteINTEL:
1495 OpCode = SPIRV::OpSubgroupImageBlockWriteINTEL;
1496 break;
1497 }
1498 }
1499 }
1500 }
1501
1502 // TODO: opaque pointers types should be eventually resolved in such a way
1503 // that validation of block read is enabled with respect to the following
1504 // specification requirement:
1505 // "'Result Type' may be a scalar or vector type, and its component type must
1506 // be equal to the type pointed to by 'Ptr'."
1507 // For example, function parameter type should not be default i8 pointer, but
1508 // depend on the result type of the instruction where it is used as a pointer
1509 // argument of OpSubgroupBlockReadINTEL
1510
1511 // Build Intel subgroups instruction
1513 IntelSubgroups->IsWrite
1514 ? MIRBuilder.buildInstr(OpCode)
1515 : MIRBuilder.buildInstr(OpCode)
1516 .addDef(Call->ReturnRegister)
1517 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1518 for (size_t i = 0; i < Call->Arguments.size(); ++i)
1519 MIB.addUse(Call->Arguments[i]);
1520 return true;
1521}
1522
1524 MachineIRBuilder &MIRBuilder,
1525 SPIRVGlobalRegistry *GR) {
1526 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1527 MachineFunction &MF = MIRBuilder.getMF();
1528 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1529 if (!ST->canUseExtension(
1530 SPIRV::Extension::SPV_KHR_uniform_group_instructions)) {
1531 std::string DiagMsg = std::string(Builtin->Name) +
1532 ": the builtin requires the following SPIR-V "
1533 "extension: SPV_KHR_uniform_group_instructions";
1534 report_fatal_error(DiagMsg.c_str(), false);
1535 }
1536 const SPIRV::GroupUniformBuiltin *GroupUniform =
1537 SPIRV::lookupGroupUniformBuiltin(Builtin->Name);
1538 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1539
1540 Register GroupResultReg = Call->ReturnRegister;
1541 Register ScopeReg = Call->Arguments[0];
1542 Register ValueReg = Call->Arguments[2];
1543
1544 // Group Operation
1545 Register ConstGroupOpReg = Call->Arguments[1];
1546 const MachineInstr *Const = getDefInstrMaybeConstant(ConstGroupOpReg, MRI);
1547 if (!Const || Const->getOpcode() != TargetOpcode::G_CONSTANT)
1549 "expect a constant group operation for a uniform group instruction",
1550 false);
1551 const MachineOperand &ConstOperand = Const->getOperand(1);
1552 if (!ConstOperand.isCImm())
1553 report_fatal_error("uniform group instructions: group operation must be an "
1554 "integer constant",
1555 false);
1556
1557 auto MIB = MIRBuilder.buildInstr(GroupUniform->Opcode)
1558 .addDef(GroupResultReg)
1559 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1560 .addUse(ScopeReg);
1561 addNumImm(ConstOperand.getCImm()->getValue(), MIB);
1562 MIB.addUse(ValueReg);
1563
1564 return true;
1565}
1566
1568 MachineIRBuilder &MIRBuilder,
1569 SPIRVGlobalRegistry *GR) {
1570 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1571 MachineFunction &MF = MIRBuilder.getMF();
1572 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1573 if (!ST->canUseExtension(SPIRV::Extension::SPV_KHR_shader_clock)) {
1574 std::string DiagMsg = std::string(Builtin->Name) +
1575 ": the builtin requires the following SPIR-V "
1576 "extension: SPV_KHR_shader_clock";
1577 report_fatal_error(DiagMsg.c_str(), false);
1578 }
1579
1580 Register ResultReg = Call->ReturnRegister;
1581
1582 if (Builtin->Name == "__spirv_ReadClockKHR") {
1583 MIRBuilder.buildInstr(SPIRV::OpReadClockKHR)
1584 .addDef(ResultReg)
1585 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1586 .addUse(Call->Arguments[0]);
1587 } else {
1588 // Deduce the `Scope` operand from the builtin function name.
1589 SPIRV::Scope::Scope ScopeArg =
1591 .EndsWith("device", SPIRV::Scope::Scope::Device)
1592 .EndsWith("work_group", SPIRV::Scope::Scope::Workgroup)
1593 .EndsWith("sub_group", SPIRV::Scope::Scope::Subgroup);
1594 Register ScopeReg = buildConstantIntReg32(ScopeArg, MIRBuilder, GR);
1595
1596 MIRBuilder.buildInstr(SPIRV::OpReadClockKHR)
1597 .addDef(ResultReg)
1598 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1599 .addUse(ScopeReg);
1600 }
1601
1602 return true;
1603}
1604
1605// These queries ask for a single size_t result for a given dimension index,
1606// e.g. size_t get_global_id(uint dimindex). In SPIR-V, the builtins
1607// corresponding to these values are all vec3 types, so we need to extract the
1608// correct index or return DefaultValue (0 or 1 depending on the query). We also
1609// handle extending or truncating in case size_t does not match the expected
1610// result type's bitwidth.
1611//
1612// For a constant index >= 3 we generate:
1613// %res = OpConstant %SizeT DefaultValue
1614//
1615// For other indices we generate:
1616// %g = OpVariable %ptr_V3_SizeT Input
1617// OpDecorate %g BuiltIn XXX
1618// OpDecorate %g LinkageAttributes "__spirv_BuiltInXXX"
1619// OpDecorate %g Constant
1620// %loadedVec = OpLoad %V3_SizeT %g
1621//
1622// Then, if the index is constant < 3, we generate:
1623// %res = OpCompositeExtract %SizeT %loadedVec idx
1624// If the index is dynamic, we generate:
1625// %tmp = OpVectorExtractDynamic %SizeT %loadedVec %idx
1626// %cmp = OpULessThan %bool %idx %const_3
1627// %res = OpSelect %SizeT %cmp %tmp %const_<DefaultValue>
1628//
1629// If the bitwidth of %res does not match the expected return type, we add an
1630// extend or truncate.
1632 MachineIRBuilder &MIRBuilder,
1634 SPIRV::BuiltIn::BuiltIn BuiltinValue,
1635 uint64_t DefaultValue) {
1636 Register IndexRegister = Call->Arguments[0];
1637 const unsigned ResultWidth = Call->ReturnType->getOperand(1).getImm();
1638 const unsigned PointerSize = GR->getPointerSize();
1639 const SPIRVType *PointerSizeType =
1640 GR->getOrCreateSPIRVIntegerType(PointerSize, MIRBuilder);
1641 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1642 auto IndexInstruction = getDefInstrMaybeConstant(IndexRegister, MRI);
1643
1644 // Set up the final register to do truncation or extension on at the end.
1645 Register ToTruncate = Call->ReturnRegister;
1646
1647 // If the index is constant, we can statically determine if it is in range.
1648 bool IsConstantIndex =
1649 IndexInstruction->getOpcode() == TargetOpcode::G_CONSTANT;
1650
1651 // If it's out of range (max dimension is 3), we can just return the constant
1652 // default value (0 or 1 depending on which query function).
1653 if (IsConstantIndex && getIConstVal(IndexRegister, MRI) >= 3) {
1654 Register DefaultReg = Call->ReturnRegister;
1655 if (PointerSize != ResultWidth) {
1656 DefaultReg = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1657 MRI->setRegClass(DefaultReg, &SPIRV::iIDRegClass);
1658 GR->assignSPIRVTypeToVReg(PointerSizeType, DefaultReg,
1659 MIRBuilder.getMF());
1660 ToTruncate = DefaultReg;
1661 }
1662 auto NewRegister =
1663 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType, true);
1664 MIRBuilder.buildCopy(DefaultReg, NewRegister);
1665 } else { // If it could be in range, we need to load from the given builtin.
1666 auto Vec3Ty =
1667 GR->getOrCreateSPIRVVectorType(PointerSizeType, 3, MIRBuilder, true);
1668 Register LoadedVector =
1669 buildBuiltinVariableLoad(MIRBuilder, Vec3Ty, GR, BuiltinValue,
1670 LLT::fixed_vector(3, PointerSize));
1671 // Set up the vreg to extract the result to (possibly a new temporary one).
1672 Register Extracted = Call->ReturnRegister;
1673 if (!IsConstantIndex || PointerSize != ResultWidth) {
1674 Extracted = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1675 MRI->setRegClass(Extracted, &SPIRV::iIDRegClass);
1676 GR->assignSPIRVTypeToVReg(PointerSizeType, Extracted, MIRBuilder.getMF());
1677 }
1678 // Use Intrinsic::spv_extractelt so dynamic vs static extraction is
1679 // handled later: extr = spv_extractelt LoadedVector, IndexRegister.
1680 MachineInstrBuilder ExtractInst = MIRBuilder.buildIntrinsic(
1681 Intrinsic::spv_extractelt, ArrayRef<Register>{Extracted}, true, false);
1682 ExtractInst.addUse(LoadedVector).addUse(IndexRegister);
1683
1684 // If the index is dynamic, need check if it's < 3, and then use a select.
1685 if (!IsConstantIndex) {
1686 updateRegType(Extracted, nullptr, PointerSizeType, GR, MIRBuilder, *MRI);
1687
1688 auto IndexType = GR->getSPIRVTypeForVReg(IndexRegister);
1689 auto BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder, true);
1690
1691 Register CompareRegister =
1692 MRI->createGenericVirtualRegister(LLT::scalar(1));
1693 MRI->setRegClass(CompareRegister, &SPIRV::iIDRegClass);
1694 GR->assignSPIRVTypeToVReg(BoolType, CompareRegister, MIRBuilder.getMF());
1695
1696 // Use G_ICMP to check if idxVReg < 3.
1697 MIRBuilder.buildICmp(
1698 CmpInst::ICMP_ULT, CompareRegister, IndexRegister,
1699 GR->buildConstantInt(3, MIRBuilder, IndexType, true));
1700
1701 // Get constant for the default value (0 or 1 depending on which
1702 // function).
1703 Register DefaultRegister =
1704 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType, true);
1705
1706 // Get a register for the selection result (possibly a new temporary one).
1707 Register SelectionResult = Call->ReturnRegister;
1708 if (PointerSize != ResultWidth) {
1709 SelectionResult =
1710 MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1711 MRI->setRegClass(SelectionResult, &SPIRV::iIDRegClass);
1712 GR->assignSPIRVTypeToVReg(PointerSizeType, SelectionResult,
1713 MIRBuilder.getMF());
1714 }
1715 // Create the final G_SELECT to return the extracted value or the default.
1716 MIRBuilder.buildSelect(SelectionResult, CompareRegister, Extracted,
1717 DefaultRegister);
1718 ToTruncate = SelectionResult;
1719 } else {
1720 ToTruncate = Extracted;
1721 }
1722 }
1723 // Alter the result's bitwidth if it does not match the SizeT value extracted.
1724 if (PointerSize != ResultWidth)
1725 MIRBuilder.buildZExtOrTrunc(Call->ReturnRegister, ToTruncate);
1726 return true;
1727}
1728
1730 MachineIRBuilder &MIRBuilder,
1731 SPIRVGlobalRegistry *GR) {
1732 // Lookup the builtin variable record.
1733 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1734 SPIRV::BuiltIn::BuiltIn Value =
1735 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1736
1737 if (Value == SPIRV::BuiltIn::GlobalInvocationId)
1738 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, 0);
1739
1740 // Build a load instruction for the builtin variable.
1741 unsigned BitWidth = GR->getScalarOrVectorBitWidth(Call->ReturnType);
1742 LLT LLType;
1743 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeVector)
1744 LLType =
1745 LLT::fixed_vector(Call->ReturnType->getOperand(2).getImm(), BitWidth);
1746 else
1747 LLType = LLT::scalar(BitWidth);
1748
1749 return buildBuiltinVariableLoad(MIRBuilder, Call->ReturnType, GR, Value,
1750 LLType, Call->ReturnRegister);
1751}
1752
1754 MachineIRBuilder &MIRBuilder,
1755 SPIRVGlobalRegistry *GR) {
1756 // Lookup the instruction opcode in the TableGen records.
1757 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1758 unsigned Opcode =
1759 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1760
1761 switch (Opcode) {
1762 case SPIRV::OpStore:
1763 return buildAtomicInitInst(Call, MIRBuilder);
1764 case SPIRV::OpAtomicLoad:
1765 return buildAtomicLoadInst(Call, MIRBuilder, GR);
1766 case SPIRV::OpAtomicStore:
1767 return buildAtomicStoreInst(Call, MIRBuilder, GR);
1768 case SPIRV::OpAtomicCompareExchange:
1769 case SPIRV::OpAtomicCompareExchangeWeak:
1770 return buildAtomicCompareExchangeInst(Call, Builtin, Opcode, MIRBuilder,
1771 GR);
1772 case SPIRV::OpAtomicIAdd:
1773 case SPIRV::OpAtomicISub:
1774 case SPIRV::OpAtomicOr:
1775 case SPIRV::OpAtomicXor:
1776 case SPIRV::OpAtomicAnd:
1777 case SPIRV::OpAtomicExchange:
1778 return buildAtomicRMWInst(Call, Opcode, MIRBuilder, GR);
1779 case SPIRV::OpMemoryBarrier:
1780 return buildBarrierInst(Call, SPIRV::OpMemoryBarrier, MIRBuilder, GR);
1781 case SPIRV::OpAtomicFlagTestAndSet:
1782 case SPIRV::OpAtomicFlagClear:
1783 return buildAtomicFlagInst(Call, Opcode, MIRBuilder, GR);
1784 default:
1785 if (Call->isSpirvOp())
1786 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1787 GR->getSPIRVTypeID(Call->ReturnType));
1788 return false;
1789 }
1790}
1791
1793 MachineIRBuilder &MIRBuilder,
1794 SPIRVGlobalRegistry *GR) {
1795 // Lookup the instruction opcode in the TableGen records.
1796 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1797 unsigned Opcode = SPIRV::lookupAtomicFloatingBuiltin(Builtin->Name)->Opcode;
1798
1799 switch (Opcode) {
1800 case SPIRV::OpAtomicFAddEXT:
1801 case SPIRV::OpAtomicFMinEXT:
1802 case SPIRV::OpAtomicFMaxEXT:
1803 return buildAtomicFloatingRMWInst(Call, Opcode, MIRBuilder, GR);
1804 default:
1805 return false;
1806 }
1807}
1808
1810 MachineIRBuilder &MIRBuilder,
1811 SPIRVGlobalRegistry *GR) {
1812 // Lookup the instruction opcode in the TableGen records.
1813 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1814 unsigned Opcode =
1815 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1816
1817 return buildBarrierInst(Call, Opcode, MIRBuilder, GR);
1818}
1819
1821 MachineIRBuilder &MIRBuilder,
1822 SPIRVGlobalRegistry *GR) {
1823 // Lookup the instruction opcode in the TableGen records.
1824 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1825 unsigned Opcode =
1826 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1827
1828 if (Opcode == SPIRV::OpGenericCastToPtrExplicit) {
1829 SPIRV::StorageClass::StorageClass ResSC =
1830 GR->getPointerStorageClass(Call->ReturnRegister);
1831 if (!isGenericCastablePtr(ResSC))
1832 return false;
1833
1834 MIRBuilder.buildInstr(Opcode)
1835 .addDef(Call->ReturnRegister)
1836 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1837 .addUse(Call->Arguments[0])
1838 .addImm(ResSC);
1839 } else {
1840 MIRBuilder.buildInstr(TargetOpcode::G_ADDRSPACE_CAST)
1841 .addDef(Call->ReturnRegister)
1842 .addUse(Call->Arguments[0]);
1843 }
1844 return true;
1845}
1846
1847static bool generateDotOrFMulInst(const StringRef DemangledCall,
1849 MachineIRBuilder &MIRBuilder,
1850 SPIRVGlobalRegistry *GR) {
1851 if (Call->isSpirvOp())
1852 return buildOpFromWrapper(MIRBuilder, SPIRV::OpDot, Call,
1853 GR->getSPIRVTypeID(Call->ReturnType));
1854
1855 bool IsVec = GR->getSPIRVTypeForVReg(Call->Arguments[0])->getOpcode() ==
1856 SPIRV::OpTypeVector;
1857 // Use OpDot only in case of vector args and OpFMul in case of scalar args.
1858 uint32_t OC = IsVec ? SPIRV::OpDot : SPIRV::OpFMulS;
1859 bool IsSwapReq = false;
1860
1861 const auto *ST =
1862 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
1863 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt) &&
1864 (ST->canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
1865 ST->isAtLeastSPIRVVer(VersionTuple(1, 6)))) {
1866 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1867 const SPIRV::IntegerDotProductBuiltin *IntDot =
1868 SPIRV::lookupIntegerDotProductBuiltin(Builtin->Name);
1869 if (IntDot) {
1870 OC = IntDot->Opcode;
1871 IsSwapReq = IntDot->IsSwapReq;
1872 } else if (IsVec) {
1873 // Handling "dot" and "dot_acc_sat" builtins which use vectors of
1874 // integers.
1875 LLVMContext &Ctx = MIRBuilder.getContext();
1877 SPIRV::parseBuiltinTypeStr(TypeStrs, DemangledCall, Ctx);
1878 bool IsFirstSigned = TypeStrs[0].trim()[0] != 'u';
1879 bool IsSecondSigned = TypeStrs[1].trim()[0] != 'u';
1880
1881 if (Call->BuiltinName == "dot") {
1882 if (IsFirstSigned && IsSecondSigned)
1883 OC = SPIRV::OpSDot;
1884 else if (!IsFirstSigned && !IsSecondSigned)
1885 OC = SPIRV::OpUDot;
1886 else {
1887 OC = SPIRV::OpSUDot;
1888 if (!IsFirstSigned)
1889 IsSwapReq = true;
1890 }
1891 } else if (Call->BuiltinName == "dot_acc_sat") {
1892 if (IsFirstSigned && IsSecondSigned)
1893 OC = SPIRV::OpSDotAccSat;
1894 else if (!IsFirstSigned && !IsSecondSigned)
1895 OC = SPIRV::OpUDotAccSat;
1896 else {
1897 OC = SPIRV::OpSUDotAccSat;
1898 if (!IsFirstSigned)
1899 IsSwapReq = true;
1900 }
1901 }
1902 }
1903 }
1904
1905 MachineInstrBuilder MIB = MIRBuilder.buildInstr(OC)
1906 .addDef(Call->ReturnRegister)
1907 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1908
1909 if (IsSwapReq) {
1910 MIB.addUse(Call->Arguments[1]);
1911 MIB.addUse(Call->Arguments[0]);
1912 // needed for dot_acc_sat* builtins
1913 for (size_t i = 2; i < Call->Arguments.size(); ++i)
1914 MIB.addUse(Call->Arguments[i]);
1915 } else {
1916 for (size_t i = 0; i < Call->Arguments.size(); ++i)
1917 MIB.addUse(Call->Arguments[i]);
1918 }
1919
1920 // Add Packed Vector Format for Integer dot product builtins if arguments are
1921 // scalar
1922 if (!IsVec && OC != SPIRV::OpFMulS)
1923 MIB.addImm(SPIRV::PackedVectorFormat4x8Bit);
1924
1925 return true;
1926}
1927
1929 MachineIRBuilder &MIRBuilder,
1930 SPIRVGlobalRegistry *GR) {
1931 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1932 SPIRV::BuiltIn::BuiltIn Value =
1933 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1934
1935 // For now, we only support a single Wave intrinsic with a single return type.
1936 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt);
1937 LLT LLType = LLT::scalar(GR->getScalarOrVectorBitWidth(Call->ReturnType));
1938
1940 MIRBuilder, Call->ReturnType, GR, Value, LLType, Call->ReturnRegister,
1941 /* isConst= */ false, /* LinkageType= */ std::nullopt);
1942}
1943
1944// We expect a builtin
1945// Name(ptr sret([RetType]) %result, Type %operand1, Type %operand1)
1946// where %result is a pointer to where the result of the builtin execution
1947// is to be stored, and generate the following instructions:
1948// Res = Opcode RetType Operand1 Operand1
1949// OpStore RetVariable Res
1951 MachineIRBuilder &MIRBuilder,
1952 SPIRVGlobalRegistry *GR) {
1953 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1954 unsigned Opcode =
1955 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1956
1957 Register SRetReg = Call->Arguments[0];
1958 SPIRVType *PtrRetType = GR->getSPIRVTypeForVReg(SRetReg);
1959 SPIRVType *RetType = GR->getPointeeType(PtrRetType);
1960 if (!RetType)
1961 report_fatal_error("The first parameter must be a pointer");
1962 if (RetType->getOpcode() != SPIRV::OpTypeStruct)
1963 report_fatal_error("Expected struct type result for the arithmetic with "
1964 "overflow builtins");
1965
1966 SPIRVType *OpType1 = GR->getSPIRVTypeForVReg(Call->Arguments[1]);
1967 SPIRVType *OpType2 = GR->getSPIRVTypeForVReg(Call->Arguments[2]);
1968 if (!OpType1 || !OpType2 || OpType1 != OpType2)
1969 report_fatal_error("Operands must have the same type");
1970 if (OpType1->getOpcode() == SPIRV::OpTypeVector)
1971 switch (Opcode) {
1972 case SPIRV::OpIAddCarryS:
1973 Opcode = SPIRV::OpIAddCarryV;
1974 break;
1975 case SPIRV::OpISubBorrowS:
1976 Opcode = SPIRV::OpISubBorrowV;
1977 break;
1978 }
1979
1980 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1981 Register ResReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1982 if (const TargetRegisterClass *DstRC =
1983 MRI->getRegClassOrNull(Call->Arguments[1])) {
1984 MRI->setRegClass(ResReg, DstRC);
1985 MRI->setType(ResReg, MRI->getType(Call->Arguments[1]));
1986 } else {
1987 MRI->setType(ResReg, LLT::scalar(64));
1988 }
1989 GR->assignSPIRVTypeToVReg(RetType, ResReg, MIRBuilder.getMF());
1990 MIRBuilder.buildInstr(Opcode)
1991 .addDef(ResReg)
1992 .addUse(GR->getSPIRVTypeID(RetType))
1993 .addUse(Call->Arguments[1])
1994 .addUse(Call->Arguments[2]);
1995 MIRBuilder.buildInstr(SPIRV::OpStore).addUse(SRetReg).addUse(ResReg);
1996 return true;
1997}
1998
2000 MachineIRBuilder &MIRBuilder,
2001 SPIRVGlobalRegistry *GR) {
2002 // Lookup the builtin record.
2003 SPIRV::BuiltIn::BuiltIn Value =
2004 SPIRV::lookupGetBuiltin(Call->Builtin->Name, Call->Builtin->Set)->Value;
2005 const bool IsDefaultOne = (Value == SPIRV::BuiltIn::GlobalSize ||
2006 Value == SPIRV::BuiltIn::NumWorkgroups ||
2007 Value == SPIRV::BuiltIn::WorkgroupSize ||
2008 Value == SPIRV::BuiltIn::EnqueuedWorkgroupSize);
2009 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, IsDefaultOne ? 1 : 0);
2010}
2011
2013 MachineIRBuilder &MIRBuilder,
2014 SPIRVGlobalRegistry *GR) {
2015 // Lookup the image size query component number in the TableGen records.
2016 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2017 uint32_t Component =
2018 SPIRV::lookupImageQueryBuiltin(Builtin->Name, Builtin->Set)->Component;
2019 // Query result may either be a vector or a scalar. If return type is not a
2020 // vector, expect only a single size component. Otherwise get the number of
2021 // expected components.
2022 unsigned NumExpectedRetComponents =
2023 Call->ReturnType->getOpcode() == SPIRV::OpTypeVector
2024 ? Call->ReturnType->getOperand(2).getImm()
2025 : 1;
2026 // Get the actual number of query result/size components.
2027 SPIRVType *ImgType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
2028 unsigned NumActualRetComponents = getNumSizeComponents(ImgType);
2029 Register QueryResult = Call->ReturnRegister;
2030 SPIRVType *QueryResultType = Call->ReturnType;
2031 if (NumExpectedRetComponents != NumActualRetComponents) {
2032 unsigned Bitwidth = Call->ReturnType->getOpcode() == SPIRV::OpTypeInt
2033 ? Call->ReturnType->getOperand(1).getImm()
2034 : 32;
2035 QueryResult = MIRBuilder.getMRI()->createGenericVirtualRegister(
2036 LLT::fixed_vector(NumActualRetComponents, Bitwidth));
2037 MIRBuilder.getMRI()->setRegClass(QueryResult, &SPIRV::vIDRegClass);
2038 SPIRVType *IntTy = GR->getOrCreateSPIRVIntegerType(Bitwidth, MIRBuilder);
2039 QueryResultType = GR->getOrCreateSPIRVVectorType(
2040 IntTy, NumActualRetComponents, MIRBuilder, true);
2041 GR->assignSPIRVTypeToVReg(QueryResultType, QueryResult, MIRBuilder.getMF());
2042 }
2043 bool IsDimBuf = ImgType->getOperand(2).getImm() == SPIRV::Dim::DIM_Buffer;
2044 unsigned Opcode =
2045 IsDimBuf ? SPIRV::OpImageQuerySize : SPIRV::OpImageQuerySizeLod;
2046 auto MIB = MIRBuilder.buildInstr(Opcode)
2047 .addDef(QueryResult)
2048 .addUse(GR->getSPIRVTypeID(QueryResultType))
2049 .addUse(Call->Arguments[0]);
2050 if (!IsDimBuf)
2051 MIB.addUse(buildConstantIntReg32(0, MIRBuilder, GR)); // Lod id.
2052 if (NumExpectedRetComponents == NumActualRetComponents)
2053 return true;
2054 if (NumExpectedRetComponents == 1) {
2055 // Only 1 component is expected, build OpCompositeExtract instruction.
2056 unsigned ExtractedComposite =
2057 Component == 3 ? NumActualRetComponents - 1 : Component;
2058 assert(ExtractedComposite < NumActualRetComponents &&
2059 "Invalid composite index!");
2060 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2061 SPIRVType *NewType = nullptr;
2062 if (QueryResultType->getOpcode() == SPIRV::OpTypeVector) {
2063 Register NewTypeReg = QueryResultType->getOperand(1).getReg();
2064 if (TypeReg != NewTypeReg &&
2065 (NewType = GR->getSPIRVTypeForVReg(NewTypeReg)) != nullptr)
2066 TypeReg = NewTypeReg;
2067 }
2068 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
2069 .addDef(Call->ReturnRegister)
2070 .addUse(TypeReg)
2071 .addUse(QueryResult)
2072 .addImm(ExtractedComposite);
2073 if (NewType != nullptr)
2074 updateRegType(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder,
2075 MIRBuilder.getMF().getRegInfo());
2076 } else {
2077 // More than 1 component is expected, fill a new vector.
2078 auto MIB = MIRBuilder.buildInstr(SPIRV::OpVectorShuffle)
2079 .addDef(Call->ReturnRegister)
2080 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2081 .addUse(QueryResult)
2082 .addUse(QueryResult);
2083 for (unsigned i = 0; i < NumExpectedRetComponents; ++i)
2084 MIB.addImm(i < NumActualRetComponents ? i : 0xffffffff);
2085 }
2086 return true;
2087}
2088
2090 MachineIRBuilder &MIRBuilder,
2091 SPIRVGlobalRegistry *GR) {
2092 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt &&
2093 "Image samples query result must be of int type!");
2094
2095 // Lookup the instruction opcode in the TableGen records.
2096 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2097 unsigned Opcode =
2098 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2099
2100 Register Image = Call->Arguments[0];
2101 SPIRV::Dim::Dim ImageDimensionality = static_cast<SPIRV::Dim::Dim>(
2102 GR->getSPIRVTypeForVReg(Image)->getOperand(2).getImm());
2103 (void)ImageDimensionality;
2104
2105 switch (Opcode) {
2106 case SPIRV::OpImageQuerySamples:
2107 assert(ImageDimensionality == SPIRV::Dim::DIM_2D &&
2108 "Image must be of 2D dimensionality");
2109 break;
2110 case SPIRV::OpImageQueryLevels:
2111 assert((ImageDimensionality == SPIRV::Dim::DIM_1D ||
2112 ImageDimensionality == SPIRV::Dim::DIM_2D ||
2113 ImageDimensionality == SPIRV::Dim::DIM_3D ||
2114 ImageDimensionality == SPIRV::Dim::DIM_Cube) &&
2115 "Image must be of 1D/2D/3D/Cube dimensionality");
2116 break;
2117 }
2118
2119 MIRBuilder.buildInstr(Opcode)
2120 .addDef(Call->ReturnRegister)
2121 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2122 .addUse(Image);
2123 return true;
2124}
2125
2126// TODO: Move to TableGen.
2127static SPIRV::SamplerAddressingMode::SamplerAddressingMode
2129 switch (Bitmask & SPIRV::CLK_ADDRESS_MODE_MASK) {
2130 case SPIRV::CLK_ADDRESS_CLAMP:
2131 return SPIRV::SamplerAddressingMode::Clamp;
2132 case SPIRV::CLK_ADDRESS_CLAMP_TO_EDGE:
2133 return SPIRV::SamplerAddressingMode::ClampToEdge;
2134 case SPIRV::CLK_ADDRESS_REPEAT:
2135 return SPIRV::SamplerAddressingMode::Repeat;
2136 case SPIRV::CLK_ADDRESS_MIRRORED_REPEAT:
2137 return SPIRV::SamplerAddressingMode::RepeatMirrored;
2138 case SPIRV::CLK_ADDRESS_NONE:
2139 return SPIRV::SamplerAddressingMode::None;
2140 default:
2141 report_fatal_error("Unknown CL address mode");
2142 }
2143}
2144
2145static unsigned getSamplerParamFromBitmask(unsigned Bitmask) {
2146 return (Bitmask & SPIRV::CLK_NORMALIZED_COORDS_TRUE) ? 1 : 0;
2147}
2148
2149static SPIRV::SamplerFilterMode::SamplerFilterMode
2151 if (Bitmask & SPIRV::CLK_FILTER_LINEAR)
2152 return SPIRV::SamplerFilterMode::Linear;
2153 if (Bitmask & SPIRV::CLK_FILTER_NEAREST)
2154 return SPIRV::SamplerFilterMode::Nearest;
2155 return SPIRV::SamplerFilterMode::Nearest;
2156}
2157
2158static bool generateReadImageInst(const StringRef DemangledCall,
2160 MachineIRBuilder &MIRBuilder,
2161 SPIRVGlobalRegistry *GR) {
2162 if (Call->isSpirvOp())
2163 return buildOpFromWrapper(MIRBuilder, SPIRV::OpImageRead, Call,
2164 GR->getSPIRVTypeID(Call->ReturnType));
2165 Register Image = Call->Arguments[0];
2166 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2167 bool HasOclSampler = DemangledCall.contains_insensitive("ocl_sampler");
2168 bool HasMsaa = DemangledCall.contains_insensitive("msaa");
2169 if (HasOclSampler) {
2170 Register Sampler = Call->Arguments[1];
2171
2172 if (!GR->isScalarOfType(Sampler, SPIRV::OpTypeSampler) &&
2173 getDefInstrMaybeConstant(Sampler, MRI)->getOperand(1).isCImm()) {
2174 uint64_t SamplerMask = getIConstVal(Sampler, MRI);
2175 Sampler = GR->buildConstantSampler(
2177 getSamplerParamFromBitmask(SamplerMask),
2178 getSamplerFilterModeFromBitmask(SamplerMask), MIRBuilder);
2179 }
2180 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
2181 SPIRVType *SampledImageType =
2182 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
2183 Register SampledImage = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2184
2185 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
2186 .addDef(SampledImage)
2187 .addUse(GR->getSPIRVTypeID(SampledImageType))
2188 .addUse(Image)
2189 .addUse(Sampler);
2190
2192 MIRBuilder);
2193
2194 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeVector) {
2195 SPIRVType *TempType =
2196 GR->getOrCreateSPIRVVectorType(Call->ReturnType, 4, MIRBuilder, true);
2197 Register TempRegister =
2198 MRI->createGenericVirtualRegister(GR->getRegType(TempType));
2199 MRI->setRegClass(TempRegister, GR->getRegClass(TempType));
2200 GR->assignSPIRVTypeToVReg(TempType, TempRegister, MIRBuilder.getMF());
2201 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
2202 .addDef(TempRegister)
2203 .addUse(GR->getSPIRVTypeID(TempType))
2204 .addUse(SampledImage)
2205 .addUse(Call->Arguments[2]) // Coordinate.
2206 .addImm(SPIRV::ImageOperand::Lod)
2207 .addUse(Lod);
2208 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
2209 .addDef(Call->ReturnRegister)
2210 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2211 .addUse(TempRegister)
2212 .addImm(0);
2213 } else {
2214 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
2215 .addDef(Call->ReturnRegister)
2216 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2217 .addUse(SampledImage)
2218 .addUse(Call->Arguments[2]) // Coordinate.
2219 .addImm(SPIRV::ImageOperand::Lod)
2220 .addUse(Lod);
2221 }
2222 } else if (HasMsaa) {
2223 MIRBuilder.buildInstr(SPIRV::OpImageRead)
2224 .addDef(Call->ReturnRegister)
2225 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2226 .addUse(Image)
2227 .addUse(Call->Arguments[1]) // Coordinate.
2228 .addImm(SPIRV::ImageOperand::Sample)
2229 .addUse(Call->Arguments[2]);
2230 } else {
2231 MIRBuilder.buildInstr(SPIRV::OpImageRead)
2232 .addDef(Call->ReturnRegister)
2233 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2234 .addUse(Image)
2235 .addUse(Call->Arguments[1]); // Coordinate.
2236 }
2237 return true;
2238}
2239
2241 MachineIRBuilder &MIRBuilder,
2242 SPIRVGlobalRegistry *GR) {
2243 if (Call->isSpirvOp())
2244 return buildOpFromWrapper(MIRBuilder, SPIRV::OpImageWrite, Call,
2245 Register(0));
2246 MIRBuilder.buildInstr(SPIRV::OpImageWrite)
2247 .addUse(Call->Arguments[0]) // Image.
2248 .addUse(Call->Arguments[1]) // Coordinate.
2249 .addUse(Call->Arguments[2]); // Texel.
2250 return true;
2251}
2252
2253static bool generateSampleImageInst(const StringRef DemangledCall,
2255 MachineIRBuilder &MIRBuilder,
2256 SPIRVGlobalRegistry *GR) {
2257 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2258 if (Call->Builtin->Name.contains_insensitive(
2259 "__translate_sampler_initializer")) {
2260 // Build sampler literal.
2261 uint64_t Bitmask = getIConstVal(Call->Arguments[0], MRI);
2262 Register Sampler = GR->buildConstantSampler(
2263 Call->ReturnRegister, getSamplerAddressingModeFromBitmask(Bitmask),
2265 getSamplerFilterModeFromBitmask(Bitmask), MIRBuilder);
2266 return Sampler.isValid();
2267 } else if (Call->Builtin->Name.contains_insensitive("__spirv_SampledImage")) {
2268 // Create OpSampledImage.
2269 Register Image = Call->Arguments[0];
2270 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
2271 SPIRVType *SampledImageType =
2272 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
2273 Register SampledImage =
2274 Call->ReturnRegister.isValid()
2275 ? Call->ReturnRegister
2276 : MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2277 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
2278 .addDef(SampledImage)
2279 .addUse(GR->getSPIRVTypeID(SampledImageType))
2280 .addUse(Image)
2281 .addUse(Call->Arguments[1]); // Sampler.
2282 return true;
2283 } else if (Call->Builtin->Name.contains_insensitive(
2284 "__spirv_ImageSampleExplicitLod")) {
2285 // Sample an image using an explicit level of detail.
2286 std::string ReturnType = DemangledCall.str();
2287 if (DemangledCall.contains("_R")) {
2288 ReturnType = ReturnType.substr(ReturnType.find("_R") + 2);
2289 ReturnType = ReturnType.substr(0, ReturnType.find('('));
2290 }
2291 SPIRVType *Type =
2292 Call->ReturnType
2293 ? Call->ReturnType
2294 : GR->getOrCreateSPIRVTypeByName(ReturnType, MIRBuilder, true);
2295 if (!Type) {
2296 std::string DiagMsg =
2297 "Unable to recognize SPIRV type name: " + ReturnType;
2298 report_fatal_error(DiagMsg.c_str());
2299 }
2300 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
2301 .addDef(Call->ReturnRegister)
2303 .addUse(Call->Arguments[0]) // Image.
2304 .addUse(Call->Arguments[1]) // Coordinate.
2305 .addImm(SPIRV::ImageOperand::Lod)
2306 .addUse(Call->Arguments[3]);
2307 return true;
2308 }
2309 return false;
2310}
2311
2313 MachineIRBuilder &MIRBuilder) {
2314 MIRBuilder.buildSelect(Call->ReturnRegister, Call->Arguments[0],
2315 Call->Arguments[1], Call->Arguments[2]);
2316 return true;
2317}
2318
2320 MachineIRBuilder &MIRBuilder,
2321 SPIRVGlobalRegistry *GR) {
2322 createContinuedInstructions(MIRBuilder, SPIRV::OpCompositeConstruct, 3,
2323 SPIRV::OpCompositeConstructContinuedINTEL,
2324 Call->Arguments, Call->ReturnRegister,
2325 GR->getSPIRVTypeID(Call->ReturnType));
2326 return true;
2327}
2328
2330 MachineIRBuilder &MIRBuilder,
2331 SPIRVGlobalRegistry *GR) {
2332 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2333 unsigned Opcode =
2334 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2335 bool IsSet = Opcode != SPIRV::OpCooperativeMatrixStoreKHR &&
2336 Opcode != SPIRV::OpCooperativeMatrixStoreCheckedINTEL &&
2337 Opcode != SPIRV::OpCooperativeMatrixPrefetchINTEL;
2338 unsigned ArgSz = Call->Arguments.size();
2339 unsigned LiteralIdx = 0;
2340 switch (Opcode) {
2341 // Memory operand is optional and is literal.
2342 case SPIRV::OpCooperativeMatrixLoadKHR:
2343 LiteralIdx = ArgSz > 3 ? 3 : 0;
2344 break;
2345 case SPIRV::OpCooperativeMatrixStoreKHR:
2346 LiteralIdx = ArgSz > 4 ? 4 : 0;
2347 break;
2348 case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
2349 LiteralIdx = ArgSz > 7 ? 7 : 0;
2350 break;
2351 case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
2352 LiteralIdx = ArgSz > 8 ? 8 : 0;
2353 break;
2354 // Cooperative Matrix Operands operand is optional and is literal.
2355 case SPIRV::OpCooperativeMatrixMulAddKHR:
2356 LiteralIdx = ArgSz > 3 ? 3 : 0;
2357 break;
2358 };
2359
2361 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2362 if (Opcode == SPIRV::OpCooperativeMatrixPrefetchINTEL) {
2363 const uint32_t CacheLevel = getConstFromIntrinsic(Call->Arguments[3], MRI);
2364 auto MIB = MIRBuilder.buildInstr(SPIRV::OpCooperativeMatrixPrefetchINTEL)
2365 .addUse(Call->Arguments[0]) // pointer
2366 .addUse(Call->Arguments[1]) // rows
2367 .addUse(Call->Arguments[2]) // columns
2368 .addImm(CacheLevel) // cache level
2369 .addUse(Call->Arguments[4]); // memory layout
2370 if (ArgSz > 5)
2371 MIB.addUse(Call->Arguments[5]); // stride
2372 if (ArgSz > 6) {
2373 const uint32_t MemOp = getConstFromIntrinsic(Call->Arguments[6], MRI);
2374 MIB.addImm(MemOp); // memory operand
2375 }
2376 return true;
2377 }
2378 if (LiteralIdx > 0)
2379 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[LiteralIdx], MRI));
2380 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2381 if (Opcode == SPIRV::OpCooperativeMatrixLengthKHR) {
2382 SPIRVType *CoopMatrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
2383 if (!CoopMatrType)
2384 report_fatal_error("Can't find a register's type definition");
2385 MIRBuilder.buildInstr(Opcode)
2386 .addDef(Call->ReturnRegister)
2387 .addUse(TypeReg)
2388 .addUse(CoopMatrType->getOperand(0).getReg());
2389 return true;
2390 }
2391 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2392 IsSet ? TypeReg : Register(0), ImmArgs);
2393}
2394
2396 MachineIRBuilder &MIRBuilder,
2397 SPIRVGlobalRegistry *GR) {
2398 // Lookup the instruction opcode in the TableGen records.
2399 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2400 unsigned Opcode =
2401 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2402 const MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2403
2404 switch (Opcode) {
2405 case SPIRV::OpSpecConstant: {
2406 // Build the SpecID decoration.
2407 unsigned SpecId =
2408 static_cast<unsigned>(getIConstVal(Call->Arguments[0], MRI));
2409 buildOpDecorate(Call->ReturnRegister, MIRBuilder, SPIRV::Decoration::SpecId,
2410 {SpecId});
2411 // Determine the constant MI.
2412 Register ConstRegister = Call->Arguments[1];
2413 const MachineInstr *Const = getDefInstrMaybeConstant(ConstRegister, MRI);
2414 assert(Const &&
2415 (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
2416 Const->getOpcode() == TargetOpcode::G_FCONSTANT) &&
2417 "Argument should be either an int or floating-point constant");
2418 // Determine the opcode and built the OpSpec MI.
2419 const MachineOperand &ConstOperand = Const->getOperand(1);
2420 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeBool) {
2421 assert(ConstOperand.isCImm() && "Int constant operand is expected");
2422 Opcode = ConstOperand.getCImm()->getValue().getZExtValue()
2423 ? SPIRV::OpSpecConstantTrue
2424 : SPIRV::OpSpecConstantFalse;
2425 }
2426 auto MIB = MIRBuilder.buildInstr(Opcode)
2427 .addDef(Call->ReturnRegister)
2428 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2429
2430 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeBool) {
2431 if (Const->getOpcode() == TargetOpcode::G_CONSTANT)
2432 addNumImm(ConstOperand.getCImm()->getValue(), MIB);
2433 else
2434 addNumImm(ConstOperand.getFPImm()->getValueAPF().bitcastToAPInt(), MIB);
2435 }
2436 return true;
2437 }
2438 case SPIRV::OpSpecConstantComposite: {
2439 createContinuedInstructions(MIRBuilder, Opcode, 3,
2440 SPIRV::OpSpecConstantCompositeContinuedINTEL,
2441 Call->Arguments, Call->ReturnRegister,
2442 GR->getSPIRVTypeID(Call->ReturnType));
2443 return true;
2444 }
2445 default:
2446 return false;
2447 }
2448}
2449
2451 MachineIRBuilder &MIRBuilder,
2452 SPIRVGlobalRegistry *GR) {
2453 // Lookup the instruction opcode in the TableGen records.
2454 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2455 unsigned Opcode =
2456 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2457
2458 return buildExtendedBitOpsInst(Call, Opcode, MIRBuilder, GR);
2459}
2460
2462 MachineIRBuilder &MIRBuilder,
2463 SPIRVGlobalRegistry *GR) {
2464 // Lookup the instruction opcode in the TableGen records.
2465 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2466 unsigned Opcode =
2467 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2468
2469 return buildBindlessImageINTELInst(Call, Opcode, MIRBuilder, GR);
2470}
2471
2473 MachineIRBuilder &MIRBuilder,
2474 SPIRVGlobalRegistry *GR) {
2475 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2476 unsigned Opcode =
2477 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2478 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
2479}
2480
2482 unsigned Opcode, MachineIRBuilder &MIRBuilder,
2483 SPIRVGlobalRegistry *GR) {
2484 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2486 Register InputReg = Call->Arguments[0];
2487 const Type *RetTy = GR->getTypeForSPIRVType(Call->ReturnType);
2488 bool IsSRet = RetTy->isVoidTy();
2489
2490 if (IsSRet) {
2491 const LLT ValTy = MRI->getType(InputReg);
2492 Register ActualRetValReg = MRI->createGenericVirtualRegister(ValTy);
2493 SPIRVType *InstructionType =
2494 GR->getPointeeType(GR->getSPIRVTypeForVReg(InputReg));
2495 InputReg = Call->Arguments[1];
2496 auto InputType = GR->getTypeForSPIRVType(GR->getSPIRVTypeForVReg(InputReg));
2497 Register PtrInputReg;
2498 if (InputType->getTypeID() == llvm::Type::TypeID::TypedPointerTyID) {
2499 LLT InputLLT = MRI->getType(InputReg);
2500 PtrInputReg = MRI->createGenericVirtualRegister(InputLLT);
2501 SPIRVType *PtrType =
2502 GR->getPointeeType(GR->getSPIRVTypeForVReg(InputReg));
2503 MachineMemOperand *MMO1 = MIRBuilder.getMF().getMachineMemOperand(
2505 InputLLT.getSizeInBytes(), Align(4));
2506 MIRBuilder.buildLoad(PtrInputReg, InputReg, *MMO1);
2507 MRI->setRegClass(PtrInputReg, &SPIRV::iIDRegClass);
2508 GR->assignSPIRVTypeToVReg(PtrType, PtrInputReg, MIRBuilder.getMF());
2509 }
2510
2511 for (unsigned index = 2; index < 7; index++) {
2512 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[index], MRI));
2513 }
2514
2515 // Emit the instruction
2516 auto MIB = MIRBuilder.buildInstr(Opcode)
2517 .addDef(ActualRetValReg)
2518 .addUse(GR->getSPIRVTypeID(InstructionType));
2519 if (PtrInputReg)
2520 MIB.addUse(PtrInputReg);
2521 else
2522 MIB.addUse(InputReg);
2523
2524 for (uint32_t Imm : ImmArgs)
2525 MIB.addImm(Imm);
2526 unsigned Size = ValTy.getSizeInBytes();
2527 // Store result to the pointer passed in Arg[0]
2528 MachineMemOperand *MMO = MIRBuilder.getMF().getMachineMemOperand(
2530 MRI->setRegClass(ActualRetValReg, &SPIRV::pIDRegClass);
2531 MIRBuilder.buildStore(ActualRetValReg, Call->Arguments[0], *MMO);
2532 return true;
2533 } else {
2534 for (unsigned index = 1; index < 6; index++)
2535 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[index], MRI));
2536
2537 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2538 GR->getSPIRVTypeID(Call->ReturnType), ImmArgs);
2539 }
2540}
2541
2543 MachineIRBuilder &MIRBuilder,
2544 SPIRVGlobalRegistry *GR) {
2545 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2546 unsigned Opcode =
2547 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2548
2549 return buildAPFixedPointInst(Call, Opcode, MIRBuilder, GR);
2550}
2551
2552static bool
2554 MachineIRBuilder &MIRBuilder,
2555 SPIRVGlobalRegistry *GR) {
2556 // Lookup the instruction opcode in the TableGen records.
2557 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2558 unsigned Opcode =
2559 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2560
2561 return buildTernaryBitwiseFunctionINTELInst(Call, Opcode, MIRBuilder, GR);
2562}
2563
2565 MachineIRBuilder &MIRBuilder,
2566 SPIRVGlobalRegistry *GR) {
2567 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2568 unsigned Opcode =
2569 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2570
2571 return buildImageChannelDataTypeInst(Call, Opcode, MIRBuilder, GR);
2572}
2573
2575 MachineIRBuilder &MIRBuilder,
2576 SPIRVGlobalRegistry *GR) {
2577 // Lookup the instruction opcode in the TableGen records.
2578 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2579 unsigned Opcode =
2580 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2581
2582 return build2DBlockIOINTELInst(Call, Opcode, MIRBuilder, GR);
2583}
2584
2586 MachineIRBuilder &MIRBuilder,
2587 SPIRVGlobalRegistry *GR) {
2588 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2589 unsigned Opcode =
2590 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2591
2592 unsigned Scope = SPIRV::Scope::Workgroup;
2593 if (Builtin->Name.contains("sub_group"))
2594 Scope = SPIRV::Scope::Subgroup;
2595
2596 return buildPipeInst(Call, Opcode, Scope, MIRBuilder, GR);
2597}
2598
2600 MachineIRBuilder &MIRBuilder,
2601 SPIRVGlobalRegistry *GR) {
2602 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2603 unsigned Opcode =
2604 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2605
2606 bool IsSet = Opcode != SPIRV::OpPredicatedStoreINTEL;
2607 unsigned ArgSz = Call->Arguments.size();
2609 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2610 // Memory operand is optional and is literal.
2611 if (ArgSz > 3)
2612 ImmArgs.push_back(
2613 getConstFromIntrinsic(Call->Arguments[/*Literal index*/ 3], MRI));
2614
2615 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2616 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2617 IsSet ? TypeReg : Register(0), ImmArgs);
2618}
2619
2621 MachineIRBuilder &MIRBuilder,
2622 SPIRVGlobalRegistry *GR) {
2623 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2624 SPIRVType *PtrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
2625 assert(PtrType->getOpcode() == SPIRV::OpTypePointer &&
2626 PtrType->getOperand(2).isReg());
2627 Register TypeReg = PtrType->getOperand(2).getReg();
2629 MachineFunction &MF = MIRBuilder.getMF();
2630 Register TmpReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2631 GR->assignSPIRVTypeToVReg(StructType, TmpReg, MF);
2632 // Skip the first arg, it's the destination pointer. OpBuildNDRange takes
2633 // three other arguments, so pass zero constant on absence.
2634 unsigned NumArgs = Call->Arguments.size();
2635 assert(NumArgs >= 2);
2636 Register GlobalWorkSize = Call->Arguments[NumArgs < 4 ? 1 : 2];
2637 Register LocalWorkSize =
2638 NumArgs == 2 ? Register(0) : Call->Arguments[NumArgs < 4 ? 2 : 3];
2639 Register GlobalWorkOffset = NumArgs <= 3 ? Register(0) : Call->Arguments[1];
2640 if (NumArgs < 4) {
2641 Register Const;
2642 SPIRVType *SpvTy = GR->getSPIRVTypeForVReg(GlobalWorkSize);
2643 if (SpvTy->getOpcode() == SPIRV::OpTypePointer) {
2644 MachineInstr *DefInstr = MRI->getUniqueVRegDef(GlobalWorkSize);
2645 assert(DefInstr && isSpvIntrinsic(*DefInstr, Intrinsic::spv_gep) &&
2646 DefInstr->getOperand(3).isReg());
2647 Register GWSPtr = DefInstr->getOperand(3).getReg();
2648 // TODO: Maybe simplify generation of the type of the fields.
2649 unsigned Size = Call->Builtin->Name == "ndrange_3D" ? 3 : 2;
2650 unsigned BitWidth = GR->getPointerSize() == 64 ? 64 : 32;
2652 Type *FieldTy = ArrayType::get(BaseTy, Size);
2653 SPIRVType *SpvFieldTy = GR->getOrCreateSPIRVType(
2654 FieldTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, true);
2655 GlobalWorkSize = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2656 GR->assignSPIRVTypeToVReg(SpvFieldTy, GlobalWorkSize, MF);
2657 MIRBuilder.buildInstr(SPIRV::OpLoad)
2658 .addDef(GlobalWorkSize)
2659 .addUse(GR->getSPIRVTypeID(SpvFieldTy))
2660 .addUse(GWSPtr);
2661 const SPIRVSubtarget &ST =
2662 cast<SPIRVSubtarget>(MIRBuilder.getMF().getSubtarget());
2663 Const = GR->getOrCreateConstIntArray(0, Size, *MIRBuilder.getInsertPt(),
2664 SpvFieldTy, *ST.getInstrInfo());
2665 } else {
2666 Const = GR->buildConstantInt(0, MIRBuilder, SpvTy, true);
2667 }
2668 if (!LocalWorkSize.isValid())
2669 LocalWorkSize = Const;
2670 if (!GlobalWorkOffset.isValid())
2671 GlobalWorkOffset = Const;
2672 }
2673 assert(LocalWorkSize.isValid() && GlobalWorkOffset.isValid());
2674 MIRBuilder.buildInstr(SPIRV::OpBuildNDRange)
2675 .addDef(TmpReg)
2676 .addUse(TypeReg)
2677 .addUse(GlobalWorkSize)
2678 .addUse(LocalWorkSize)
2679 .addUse(GlobalWorkOffset);
2680 return MIRBuilder.buildInstr(SPIRV::OpStore)
2681 .addUse(Call->Arguments[0])
2682 .addUse(TmpReg);
2683}
2684
2685// TODO: maybe move to the global register.
2686static SPIRVType *
2688 SPIRVGlobalRegistry *GR) {
2689 LLVMContext &Context = MIRBuilder.getMF().getFunction().getContext();
2690 unsigned SC1 = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
2691 Type *PtrType = PointerType::get(Context, SC1);
2692 return GR->getOrCreateSPIRVType(PtrType, MIRBuilder,
2693 SPIRV::AccessQualifier::ReadWrite, true);
2694}
2695
2697 MachineIRBuilder &MIRBuilder,
2698 SPIRVGlobalRegistry *GR) {
2699 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2700 const DataLayout &DL = MIRBuilder.getDataLayout();
2701 bool IsSpirvOp = Call->isSpirvOp();
2702 bool HasEvents = Call->Builtin->Name.contains("events") || IsSpirvOp;
2703 const SPIRVType *Int32Ty = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
2704
2705 // Make vararg instructions before OpEnqueueKernel.
2706 // Local sizes arguments: Sizes of block invoke arguments. Clang generates
2707 // local size operands as an array, so we need to unpack them.
2708 SmallVector<Register, 16> LocalSizes;
2709 if (Call->Builtin->Name.contains("_varargs") || IsSpirvOp) {
2710 const unsigned LocalSizeArrayIdx = HasEvents ? 9 : 6;
2711 Register GepReg = Call->Arguments[LocalSizeArrayIdx];
2712 MachineInstr *GepMI = MRI->getUniqueVRegDef(GepReg);
2713 assert(isSpvIntrinsic(*GepMI, Intrinsic::spv_gep) &&
2714 GepMI->getOperand(3).isReg());
2715 Register ArrayReg = GepMI->getOperand(3).getReg();
2716 MachineInstr *ArrayMI = MRI->getUniqueVRegDef(ArrayReg);
2717 const Type *LocalSizeTy = getMachineInstrType(ArrayMI);
2718 assert(LocalSizeTy && "Local size type is expected");
2719 const uint64_t LocalSizeNum =
2720 cast<ArrayType>(LocalSizeTy)->getNumElements();
2721 unsigned SC = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
2722 const LLT LLType = LLT::pointer(SC, GR->getPointerSize());
2723 const SPIRVType *PointerSizeTy = GR->getOrCreateSPIRVPointerType(
2724 Int32Ty, MIRBuilder, SPIRV::StorageClass::Function);
2725 for (unsigned I = 0; I < LocalSizeNum; ++I) {
2726 Register Reg = MRI->createVirtualRegister(&SPIRV::pIDRegClass);
2727 MRI->setType(Reg, LLType);
2728 GR->assignSPIRVTypeToVReg(PointerSizeTy, Reg, MIRBuilder.getMF());
2729 auto GEPInst = MIRBuilder.buildIntrinsic(
2730 Intrinsic::spv_gep, ArrayRef<Register>{Reg}, true, false);
2731 GEPInst
2732 .addImm(GepMI->getOperand(2).getImm()) // In bound.
2733 .addUse(ArrayMI->getOperand(0).getReg()) // Alloca.
2734 .addUse(buildConstantIntReg32(0, MIRBuilder, GR)) // Indices.
2735 .addUse(buildConstantIntReg32(I, MIRBuilder, GR));
2736 LocalSizes.push_back(Reg);
2737 }
2738 }
2739
2740 // SPIRV OpEnqueueKernel instruction has 10+ arguments.
2741 auto MIB = MIRBuilder.buildInstr(SPIRV::OpEnqueueKernel)
2742 .addDef(Call->ReturnRegister)
2744
2745 // Copy all arguments before block invoke function pointer.
2746 const unsigned BlockFIdx = HasEvents ? 6 : 3;
2747 for (unsigned i = 0; i < BlockFIdx; i++)
2748 MIB.addUse(Call->Arguments[i]);
2749
2750 // If there are no event arguments in the original call, add dummy ones.
2751 if (!HasEvents) {
2752 MIB.addUse(buildConstantIntReg32(0, MIRBuilder, GR)); // Dummy num events.
2753 Register NullPtr = GR->getOrCreateConstNullPtr(
2754 MIRBuilder, getOrCreateSPIRVDeviceEventPointer(MIRBuilder, GR));
2755 MIB.addUse(NullPtr); // Dummy wait events.
2756 MIB.addUse(NullPtr); // Dummy ret event.
2757 }
2758
2759 MachineInstr *BlockMI = getBlockStructInstr(Call->Arguments[BlockFIdx], MRI);
2760 assert(BlockMI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
2761 // Invoke: Pointer to invoke function.
2762 MIB.addGlobalAddress(BlockMI->getOperand(1).getGlobal());
2763
2764 Register BlockLiteralReg = Call->Arguments[BlockFIdx + 1];
2765 // Param: Pointer to block literal.
2766 MIB.addUse(BlockLiteralReg);
2767
2768 Type *PType = const_cast<Type *>(getBlockStructType(BlockLiteralReg, MRI));
2769 // TODO: these numbers should be obtained from block literal structure.
2770 // Param Size: Size of block literal structure.
2771 MIB.addUse(buildConstantIntReg32(DL.getTypeStoreSize(PType), MIRBuilder, GR));
2772 // Param Aligment: Aligment of block literal structure.
2773 MIB.addUse(buildConstantIntReg32(DL.getPrefTypeAlign(PType).value(),
2774 MIRBuilder, GR));
2775
2776 for (unsigned i = 0; i < LocalSizes.size(); i++)
2777 MIB.addUse(LocalSizes[i]);
2778 return true;
2779}
2780
2782 MachineIRBuilder &MIRBuilder,
2783 SPIRVGlobalRegistry *GR) {
2784 // Lookup the instruction opcode in the TableGen records.
2785 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2786 unsigned Opcode =
2787 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2788
2789 switch (Opcode) {
2790 case SPIRV::OpRetainEvent:
2791 case SPIRV::OpReleaseEvent:
2792 return MIRBuilder.buildInstr(Opcode).addUse(Call->Arguments[0]);
2793 case SPIRV::OpCreateUserEvent:
2794 case SPIRV::OpGetDefaultQueue:
2795 return MIRBuilder.buildInstr(Opcode)
2796 .addDef(Call->ReturnRegister)
2797 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2798 case SPIRV::OpIsValidEvent:
2799 return MIRBuilder.buildInstr(Opcode)
2800 .addDef(Call->ReturnRegister)
2801 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2802 .addUse(Call->Arguments[0]);
2803 case SPIRV::OpSetUserEventStatus:
2804 return MIRBuilder.buildInstr(Opcode)
2805 .addUse(Call->Arguments[0])
2806 .addUse(Call->Arguments[1]);
2807 case SPIRV::OpCaptureEventProfilingInfo:
2808 return MIRBuilder.buildInstr(Opcode)
2809 .addUse(Call->Arguments[0])
2810 .addUse(Call->Arguments[1])
2811 .addUse(Call->Arguments[2]);
2812 case SPIRV::OpBuildNDRange:
2813 return buildNDRange(Call, MIRBuilder, GR);
2814 case SPIRV::OpEnqueueKernel:
2815 return buildEnqueueKernel(Call, MIRBuilder, GR);
2816 default:
2817 return false;
2818 }
2819}
2820
2822 MachineIRBuilder &MIRBuilder,
2823 SPIRVGlobalRegistry *GR) {
2824 // Lookup the instruction opcode in the TableGen records.
2825 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2826 unsigned Opcode =
2827 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2828
2829 bool IsSet = Opcode == SPIRV::OpGroupAsyncCopy;
2830 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2831 if (Call->isSpirvOp())
2832 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2833 IsSet ? TypeReg : Register(0));
2834
2835 auto Scope = buildConstantIntReg32(SPIRV::Scope::Workgroup, MIRBuilder, GR);
2836
2837 switch (Opcode) {
2838 case SPIRV::OpGroupAsyncCopy: {
2839 SPIRVType *NewType =
2840 Call->ReturnType->getOpcode() == SPIRV::OpTypeEvent
2841 ? nullptr
2842 : GR->getOrCreateSPIRVTypeByName("spirv.Event", MIRBuilder, true);
2843 Register TypeReg = GR->getSPIRVTypeID(NewType ? NewType : Call->ReturnType);
2844 unsigned NumArgs = Call->Arguments.size();
2845 Register EventReg = Call->Arguments[NumArgs - 1];
2846 bool Res = MIRBuilder.buildInstr(Opcode)
2847 .addDef(Call->ReturnRegister)
2848 .addUse(TypeReg)
2849 .addUse(Scope)
2850 .addUse(Call->Arguments[0])
2851 .addUse(Call->Arguments[1])
2852 .addUse(Call->Arguments[2])
2853 .addUse(Call->Arguments.size() > 4
2854 ? Call->Arguments[3]
2855 : buildConstantIntReg32(1, MIRBuilder, GR))
2856 .addUse(EventReg);
2857 if (NewType != nullptr)
2858 updateRegType(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder,
2859 MIRBuilder.getMF().getRegInfo());
2860 return Res;
2861 }
2862 case SPIRV::OpGroupWaitEvents:
2863 return MIRBuilder.buildInstr(Opcode)
2864 .addUse(Scope)
2865 .addUse(Call->Arguments[0])
2866 .addUse(Call->Arguments[1]);
2867 default:
2868 return false;
2869 }
2870}
2871
2872static bool generateConvertInst(const StringRef DemangledCall,
2874 MachineIRBuilder &MIRBuilder,
2875 SPIRVGlobalRegistry *GR) {
2876 // Lookup the conversion builtin in the TableGen records.
2877 const SPIRV::ConvertBuiltin *Builtin =
2878 SPIRV::lookupConvertBuiltin(Call->Builtin->Name, Call->Builtin->Set);
2879
2880 if (!Builtin && Call->isSpirvOp()) {
2881 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2882 unsigned Opcode =
2883 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2884 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2885 GR->getSPIRVTypeID(Call->ReturnType));
2886 }
2887
2888 assert(Builtin && "Conversion builtin not found.");
2889 if (Builtin->IsSaturated)
2890 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
2891 SPIRV::Decoration::SaturatedConversion, {});
2892 if (Builtin->IsRounded)
2893 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
2894 SPIRV::Decoration::FPRoundingMode,
2895 {(unsigned)Builtin->RoundingMode});
2896
2897 std::string NeedExtMsg; // no errors if empty
2898 bool IsRightComponentsNumber = true; // check if input/output accepts vectors
2899 unsigned Opcode = SPIRV::OpNop;
2900 if (GR->isScalarOrVectorOfType(Call->Arguments[0], SPIRV::OpTypeInt)) {
2901 // Int -> ...
2902 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2903 // Int -> Int
2904 if (Builtin->IsSaturated)
2905 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpSatConvertUToS
2906 : SPIRV::OpSatConvertSToU;
2907 else
2908 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpUConvert
2909 : SPIRV::OpSConvert;
2910 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2911 SPIRV::OpTypeFloat)) {
2912 // Int -> Float
2913 if (Builtin->IsBfloat16) {
2914 const auto *ST = static_cast<const SPIRVSubtarget *>(
2915 &MIRBuilder.getMF().getSubtarget());
2916 if (!ST->canUseExtension(
2917 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2918 NeedExtMsg = "SPV_INTEL_bfloat16_conversion";
2919 IsRightComponentsNumber =
2920 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2921 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2922 Opcode = SPIRV::OpConvertBF16ToFINTEL;
2923 } else {
2924 bool IsSourceSigned =
2925 DemangledCall[DemangledCall.find_first_of('(') + 1] != 'u';
2926 Opcode = IsSourceSigned ? SPIRV::OpConvertSToF : SPIRV::OpConvertUToF;
2927 }
2928 }
2929 } else if (GR->isScalarOrVectorOfType(Call->Arguments[0],
2930 SPIRV::OpTypeFloat)) {
2931 // Float -> ...
2932 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2933 // Float -> Int
2934 if (Builtin->IsBfloat16) {
2935 const auto *ST = static_cast<const SPIRVSubtarget *>(
2936 &MIRBuilder.getMF().getSubtarget());
2937 if (!ST->canUseExtension(
2938 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2939 NeedExtMsg = "SPV_INTEL_bfloat16_conversion";
2940 IsRightComponentsNumber =
2941 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2942 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2943 Opcode = SPIRV::OpConvertFToBF16INTEL;
2944 } else {
2945 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpConvertFToS
2946 : SPIRV::OpConvertFToU;
2947 }
2948 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2949 SPIRV::OpTypeFloat)) {
2950 if (Builtin->IsTF32) {
2951 const auto *ST = static_cast<const SPIRVSubtarget *>(
2952 &MIRBuilder.getMF().getSubtarget());
2953 if (!ST->canUseExtension(
2954 SPIRV::Extension::SPV_INTEL_tensor_float32_conversion))
2955 NeedExtMsg = "SPV_INTEL_tensor_float32_conversion";
2956 IsRightComponentsNumber =
2957 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2958 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2959 Opcode = SPIRV::OpRoundFToTF32INTEL;
2960 } else {
2961 // Float -> Float
2962 Opcode = SPIRV::OpFConvert;
2963 }
2964 }
2965 }
2966
2967 if (!NeedExtMsg.empty()) {
2968 std::string DiagMsg = std::string(Builtin->Name) +
2969 ": the builtin requires the following SPIR-V "
2970 "extension: " +
2971 NeedExtMsg;
2972 report_fatal_error(DiagMsg.c_str(), false);
2973 }
2974 if (!IsRightComponentsNumber) {
2975 std::string DiagMsg =
2976 std::string(Builtin->Name) +
2977 ": result and argument must have the same number of components";
2978 report_fatal_error(DiagMsg.c_str(), false);
2979 }
2980 assert(Opcode != SPIRV::OpNop &&
2981 "Conversion between the types not implemented!");
2982
2983 MIRBuilder.buildInstr(Opcode)
2984 .addDef(Call->ReturnRegister)
2985 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2986 .addUse(Call->Arguments[0]);
2987 return true;
2988}
2989
2991 MachineIRBuilder &MIRBuilder,
2992 SPIRVGlobalRegistry *GR) {
2993 // Lookup the vector load/store builtin in the TableGen records.
2994 const SPIRV::VectorLoadStoreBuiltin *Builtin =
2995 SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2996 Call->Builtin->Set);
2997 // Build extended instruction.
2998 auto MIB =
2999 MIRBuilder.buildInstr(SPIRV::OpExtInst)
3000 .addDef(Call->ReturnRegister)
3001 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
3002 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
3003 .addImm(Builtin->Number);
3004 for (auto Argument : Call->Arguments)
3005 MIB.addUse(Argument);
3006 if (Builtin->Name.contains("load") && Builtin->ElementCount > 1)
3007 MIB.addImm(Builtin->ElementCount);
3008
3009 // Rounding mode should be passed as a last argument in the MI for builtins
3010 // like "vstorea_halfn_r".
3011 if (Builtin->IsRounded)
3012 MIB.addImm(static_cast<uint32_t>(Builtin->RoundingMode));
3013 return true;
3014}
3015
3017 MachineIRBuilder &MIRBuilder,
3018 SPIRVGlobalRegistry *GR) {
3019 const auto *Builtin = Call->Builtin;
3020 auto *MRI = MIRBuilder.getMRI();
3021 unsigned Opcode =
3022 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
3023 const Type *RetTy = GR->getTypeForSPIRVType(Call->ReturnType);
3024 bool IsVoid = RetTy->isVoidTy();
3025 auto MIB = MIRBuilder.buildInstr(Opcode);
3026 Register DestReg;
3027 if (IsVoid) {
3028 LLT PtrTy = MRI->getType(Call->Arguments[0]);
3029 DestReg = MRI->createGenericVirtualRegister(PtrTy);
3030 MRI->setRegClass(DestReg, &SPIRV::pIDRegClass);
3031 SPIRVType *PointeeTy =
3032 GR->getPointeeType(GR->getSPIRVTypeForVReg(Call->Arguments[0]));
3033 MIB.addDef(DestReg);
3034 MIB.addUse(GR->getSPIRVTypeID(PointeeTy));
3035 } else {
3036 MIB.addDef(Call->ReturnRegister);
3037 MIB.addUse(GR->getSPIRVTypeID(Call->ReturnType));
3038 }
3039 for (unsigned i = IsVoid ? 1 : 0; i < Call->Arguments.size(); ++i) {
3040 Register Arg = Call->Arguments[i];
3041 MachineInstr *DefMI = MRI->getUniqueVRegDef(Arg);
3042 if (DefMI->getOpcode() == TargetOpcode::G_CONSTANT &&
3043 DefMI->getOperand(1).isCImm()) {
3044 MIB.addImm(getConstFromIntrinsic(Arg, MRI));
3045 } else {
3046 MIB.addUse(Arg);
3047 }
3048 }
3049 if (IsVoid) {
3050 LLT PtrTy = MRI->getType(Call->Arguments[0]);
3051 MachineMemOperand *MMO = MIRBuilder.getMF().getMachineMemOperand(
3053 PtrTy.getSizeInBytes(), Align(4));
3054 MIRBuilder.buildStore(DestReg, Call->Arguments[0], *MMO);
3055 }
3056 return true;
3057}
3058
3060 MachineIRBuilder &MIRBuilder,
3061 SPIRVGlobalRegistry *GR) {
3062 // Lookup the instruction opcode in the TableGen records.
3063 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
3064 unsigned Opcode =
3065 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
3066 bool IsLoad = Opcode == SPIRV::OpLoad;
3067 // Build the instruction.
3068 auto MIB = MIRBuilder.buildInstr(Opcode);
3069 if (IsLoad) {
3070 MIB.addDef(Call->ReturnRegister);
3071 MIB.addUse(GR->getSPIRVTypeID(Call->ReturnType));
3072 }
3073 // Add a pointer to the value to load/store.
3074 MIB.addUse(Call->Arguments[0]);
3075 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
3076 // Add a value to store.
3077 if (!IsLoad)
3078 MIB.addUse(Call->Arguments[1]);
3079 // Add optional memory attributes and an alignment.
3080 unsigned NumArgs = Call->Arguments.size();
3081 if ((IsLoad && NumArgs >= 2) || NumArgs >= 3)
3082 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 1 : 2], MRI));
3083 if ((IsLoad && NumArgs >= 3) || NumArgs >= 4)
3084 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 2 : 3], MRI));
3085 return true;
3086}
3087
3088namespace SPIRV {
3089// Try to find a builtin function attributes by a demangled function name and
3090// return a tuple <builtin group, op code, ext instruction number>, or a special
3091// tuple value <-1, 0, 0> if the builtin function is not found.
3092// Not all builtin functions are supported, only those with a ready-to-use op
3093// code or instruction number defined in TableGen.
3094// TODO: consider a major rework of mapping demangled calls into a builtin
3095// functions to unify search and decrease number of individual cases.
3096std::tuple<int, unsigned, unsigned>
3097mapBuiltinToOpcode(const StringRef DemangledCall,
3098 SPIRV::InstructionSet::InstructionSet Set) {
3099 Register Reg;
3101 std::unique_ptr<const IncomingCall> Call =
3102 lookupBuiltin(DemangledCall, Set, Reg, nullptr, Args);
3103 if (!Call)
3104 return std::make_tuple(-1, 0, 0);
3105
3106 switch (Call->Builtin->Group) {
3107 case SPIRV::Relational:
3108 case SPIRV::Atomic:
3109 case SPIRV::Barrier:
3110 case SPIRV::CastToPtr:
3111 case SPIRV::ImageMiscQuery:
3112 case SPIRV::SpecConstant:
3113 case SPIRV::Enqueue:
3114 case SPIRV::AsyncCopy:
3115 case SPIRV::LoadStore:
3116 case SPIRV::CoopMatr:
3117 if (const auto *R =
3118 SPIRV::lookupNativeBuiltin(Call->Builtin->Name, Call->Builtin->Set))
3119 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3120 break;
3121 case SPIRV::Extended:
3122 if (const auto *R = SPIRV::lookupExtendedBuiltin(Call->Builtin->Name,
3123 Call->Builtin->Set))
3124 return std::make_tuple(Call->Builtin->Group, 0, R->Number);
3125 break;
3126 case SPIRV::VectorLoadStore:
3127 if (const auto *R = SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
3128 Call->Builtin->Set))
3129 return std::make_tuple(SPIRV::Extended, 0, R->Number);
3130 break;
3131 case SPIRV::Group:
3132 if (const auto *R = SPIRV::lookupGroupBuiltin(Call->Builtin->Name))
3133 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3134 break;
3135 case SPIRV::AtomicFloating:
3136 if (const auto *R = SPIRV::lookupAtomicFloatingBuiltin(Call->Builtin->Name))
3137 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3138 break;
3139 case SPIRV::IntelSubgroups:
3140 if (const auto *R = SPIRV::lookupIntelSubgroupsBuiltin(Call->Builtin->Name))
3141 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3142 break;
3143 case SPIRV::GroupUniform:
3144 if (const auto *R = SPIRV::lookupGroupUniformBuiltin(Call->Builtin->Name))
3145 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3146 break;
3147 case SPIRV::IntegerDot:
3148 if (const auto *R =
3149 SPIRV::lookupIntegerDotProductBuiltin(Call->Builtin->Name))
3150 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3151 break;
3152 case SPIRV::WriteImage:
3153 return std::make_tuple(Call->Builtin->Group, SPIRV::OpImageWrite, 0);
3154 case SPIRV::Select:
3155 return std::make_tuple(Call->Builtin->Group, TargetOpcode::G_SELECT, 0);
3156 case SPIRV::Construct:
3157 return std::make_tuple(Call->Builtin->Group, SPIRV::OpCompositeConstruct,
3158 0);
3159 case SPIRV::KernelClock:
3160 return std::make_tuple(Call->Builtin->Group, SPIRV::OpReadClockKHR, 0);
3161 default:
3162 return std::make_tuple(-1, 0, 0);
3163 }
3164 return std::make_tuple(-1, 0, 0);
3165}
3166
3167std::optional<bool> lowerBuiltin(const StringRef DemangledCall,
3168 SPIRV::InstructionSet::InstructionSet Set,
3169 MachineIRBuilder &MIRBuilder,
3170 const Register OrigRet, const Type *OrigRetTy,
3171 const SmallVectorImpl<Register> &Args,
3172 SPIRVGlobalRegistry *GR, const CallBase &CB) {
3173 LLVM_DEBUG(dbgs() << "Lowering builtin call: " << DemangledCall << "\n");
3174
3175 // Lookup the builtin in the TableGen records.
3176 SPIRVType *SpvType = GR->getSPIRVTypeForVReg(OrigRet);
3177 assert(SpvType && "Inconsistent return register: expected valid type info");
3178 std::unique_ptr<const IncomingCall> Call =
3179 lookupBuiltin(DemangledCall, Set, OrigRet, SpvType, Args);
3180
3181 if (!Call) {
3182 LLVM_DEBUG(dbgs() << "Builtin record was not found!\n");
3183 return std::nullopt;
3184 }
3185
3186 // TODO: check if the provided args meet the builtin requirments.
3187 assert(Args.size() >= Call->Builtin->MinNumArgs &&
3188 "Too few arguments to generate the builtin");
3189 if (Call->Builtin->MaxNumArgs && Args.size() > Call->Builtin->MaxNumArgs)
3190 LLVM_DEBUG(dbgs() << "More arguments provided than required!\n");
3191
3192 // Match the builtin with implementation based on the grouping.
3193 switch (Call->Builtin->Group) {
3194 case SPIRV::Extended:
3195 return generateExtInst(Call.get(), MIRBuilder, GR, CB);
3196 case SPIRV::Relational:
3197 return generateRelationalInst(Call.get(), MIRBuilder, GR);
3198 case SPIRV::Group:
3199 return generateGroupInst(Call.get(), MIRBuilder, GR);
3200 case SPIRV::Variable:
3201 return generateBuiltinVar(Call.get(), MIRBuilder, GR);
3202 case SPIRV::Atomic:
3203 return generateAtomicInst(Call.get(), MIRBuilder, GR);
3204 case SPIRV::AtomicFloating:
3205 return generateAtomicFloatingInst(Call.get(), MIRBuilder, GR);
3206 case SPIRV::Barrier:
3207 return generateBarrierInst(Call.get(), MIRBuilder, GR);
3208 case SPIRV::CastToPtr:
3209 return generateCastToPtrInst(Call.get(), MIRBuilder, GR);
3210 case SPIRV::Dot:
3211 case SPIRV::IntegerDot:
3212 return generateDotOrFMulInst(DemangledCall, Call.get(), MIRBuilder, GR);
3213 case SPIRV::Wave:
3214 return generateWaveInst(Call.get(), MIRBuilder, GR);
3215 case SPIRV::ICarryBorrow:
3216 return generateICarryBorrowInst(Call.get(), MIRBuilder, GR);
3217 case SPIRV::GetQuery:
3218 return generateGetQueryInst(Call.get(), MIRBuilder, GR);
3219 case SPIRV::ImageSizeQuery:
3220 return generateImageSizeQueryInst(Call.get(), MIRBuilder, GR);
3221 case SPIRV::ImageMiscQuery:
3222 return generateImageMiscQueryInst(Call.get(), MIRBuilder, GR);
3223 case SPIRV::ReadImage:
3224 return generateReadImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
3225 case SPIRV::WriteImage:
3226 return generateWriteImageInst(Call.get(), MIRBuilder, GR);
3227 case SPIRV::SampleImage:
3228 return generateSampleImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
3229 case SPIRV::Select:
3230 return generateSelectInst(Call.get(), MIRBuilder);
3231 case SPIRV::Construct:
3232 return generateConstructInst(Call.get(), MIRBuilder, GR);
3233 case SPIRV::SpecConstant:
3234 return generateSpecConstantInst(Call.get(), MIRBuilder, GR);
3235 case SPIRV::Enqueue:
3236 return generateEnqueueInst(Call.get(), MIRBuilder, GR);
3237 case SPIRV::AsyncCopy:
3238 return generateAsyncCopy(Call.get(), MIRBuilder, GR);
3239 case SPIRV::Convert:
3240 return generateConvertInst(DemangledCall, Call.get(), MIRBuilder, GR);
3241 case SPIRV::VectorLoadStore:
3242 return generateVectorLoadStoreInst(Call.get(), MIRBuilder, GR);
3243 case SPIRV::LoadStore:
3244 return generateLoadStoreInst(Call.get(), MIRBuilder, GR);
3245 case SPIRV::IntelSubgroups:
3246 return generateIntelSubgroupsInst(Call.get(), MIRBuilder, GR);
3247 case SPIRV::GroupUniform:
3248 return generateGroupUniformInst(Call.get(), MIRBuilder, GR);
3249 case SPIRV::KernelClock:
3250 return generateKernelClockInst(Call.get(), MIRBuilder, GR);
3251 case SPIRV::CoopMatr:
3252 return generateCoopMatrInst(Call.get(), MIRBuilder, GR);
3253 case SPIRV::ExtendedBitOps:
3254 return generateExtendedBitOpsInst(Call.get(), MIRBuilder, GR);
3255 case SPIRV::BindlessINTEL:
3256 return generateBindlessImageINTELInst(Call.get(), MIRBuilder, GR);
3257 case SPIRV::TernaryBitwiseINTEL:
3258 return generateTernaryBitwiseFunctionINTELInst(Call.get(), MIRBuilder, GR);
3259 case SPIRV::Block2DLoadStore:
3260 return generate2DBlockIOINTELInst(Call.get(), MIRBuilder, GR);
3261 case SPIRV::Pipe:
3262 return generatePipeInst(Call.get(), MIRBuilder, GR);
3263 case SPIRV::PredicatedLoadStore:
3264 return generatePredicatedLoadStoreInst(Call.get(), MIRBuilder, GR);
3265 case SPIRV::BlockingPipes:
3266 return generateBlockingPipesInst(Call.get(), MIRBuilder, GR);
3267 case SPIRV::ArbitraryPrecisionFixedPoint:
3268 return generateAPFixedPointInst(Call.get(), MIRBuilder, GR);
3269 case SPIRV::ImageChannelDataTypes:
3270 return generateImageChannelDataTypeInst(Call.get(), MIRBuilder, GR);
3271 case SPIRV::ArbitraryFloatingPoint:
3272 return generateAFPInst(Call.get(), MIRBuilder, GR);
3273 }
3274 return false;
3275}
3276
3278 // Parse strings representing OpenCL builtin types.
3279 if (hasBuiltinTypePrefix(TypeStr)) {
3280 // OpenCL builtin types in demangled call strings have the following format:
3281 // e.g. ocl_image2d_ro
3282 [[maybe_unused]] bool IsOCLBuiltinType = TypeStr.consume_front("ocl_");
3283 assert(IsOCLBuiltinType && "Invalid OpenCL builtin prefix");
3284
3285 // Check if this is pointer to a builtin type and not just pointer
3286 // representing a builtin type. In case it is a pointer to builtin type,
3287 // this will require additional handling in the method calling
3288 // parseBuiltinCallArgumentBaseType(...) as this function only retrieves the
3289 // base types.
3290 if (TypeStr.ends_with("*"))
3291 TypeStr = TypeStr.slice(0, TypeStr.find_first_of(" *"));
3292
3293 return parseBuiltinTypeNameToTargetExtType("opencl." + TypeStr.str() + "_t",
3294 Ctx);
3295 }
3296
3297 // Parse type name in either "typeN" or "type vector[N]" format, where
3298 // N is the number of elements of the vector.
3299 Type *BaseType;
3300 unsigned VecElts = 0;
3301
3302 BaseType = parseBasicTypeName(TypeStr, Ctx);
3303 if (!BaseType)
3304 // Unable to recognize SPIRV type name.
3305 return nullptr;
3306
3307 // Handle "typeN*" or "type vector[N]*".
3308 TypeStr.consume_back("*");
3309
3310 if (TypeStr.consume_front(" vector["))
3311 TypeStr = TypeStr.substr(0, TypeStr.find(']'));
3312
3313 TypeStr.getAsInteger(10, VecElts);
3314 if (VecElts > 0)
3316 BaseType->isVoidTy() ? Type::getInt8Ty(Ctx) : BaseType, VecElts, false);
3317
3318 return BaseType;
3319}
3320
3322 const StringRef DemangledCall, LLVMContext &Ctx) {
3323 auto Pos1 = DemangledCall.find('(');
3324 if (Pos1 == StringRef::npos)
3325 return false;
3326 auto Pos2 = DemangledCall.find(')');
3327 if (Pos2 == StringRef::npos || Pos1 > Pos2)
3328 return false;
3329 DemangledCall.slice(Pos1 + 1, Pos2)
3330 .split(BuiltinArgsTypeStrs, ',', -1, false);
3331 return true;
3332}
3333
3335 unsigned ArgIdx, LLVMContext &Ctx) {
3336 SmallVector<StringRef, 10> BuiltinArgsTypeStrs;
3337 parseBuiltinTypeStr(BuiltinArgsTypeStrs, DemangledCall, Ctx);
3338 if (ArgIdx >= BuiltinArgsTypeStrs.size())
3339 return nullptr;
3340 StringRef TypeStr = BuiltinArgsTypeStrs[ArgIdx].trim();
3341 return parseBuiltinCallArgumentType(TypeStr, Ctx);
3342}
3343
3348
3349#define GET_BuiltinTypes_DECL
3350#define GET_BuiltinTypes_IMPL
3351
3356
3357#define GET_OpenCLTypes_DECL
3358#define GET_OpenCLTypes_IMPL
3359
3360#include "SPIRVGenTables.inc"
3361} // namespace SPIRV
3362
3363//===----------------------------------------------------------------------===//
3364// Misc functions for parsing builtin types.
3365//===----------------------------------------------------------------------===//
3366
3367static Type *parseTypeString(const StringRef Name, LLVMContext &Context) {
3368 if (Name.starts_with("void"))
3369 return Type::getVoidTy(Context);
3370 else if (Name.starts_with("int") || Name.starts_with("uint"))
3371 return Type::getInt32Ty(Context);
3372 else if (Name.starts_with("float"))
3373 return Type::getFloatTy(Context);
3374 else if (Name.starts_with("half"))
3375 return Type::getHalfTy(Context);
3376 report_fatal_error("Unable to recognize type!");
3377}
3378
3379//===----------------------------------------------------------------------===//
3380// Implementation functions for builtin types.
3381//===----------------------------------------------------------------------===//
3382
3384 const SPIRV::BuiltinType *TypeRecord,
3385 MachineIRBuilder &MIRBuilder,
3386 SPIRVGlobalRegistry *GR) {
3387 unsigned Opcode = TypeRecord->Opcode;
3388 // Create or get an existing type from GlobalRegistry.
3389 return GR->getOrCreateOpTypeByOpcode(ExtensionType, MIRBuilder, Opcode);
3390}
3391
3393 SPIRVGlobalRegistry *GR) {
3394 // Create or get an existing type from GlobalRegistry.
3395 return GR->getOrCreateOpTypeSampler(MIRBuilder);
3396}
3397
3398static SPIRVType *getPipeType(const TargetExtType *ExtensionType,
3399 MachineIRBuilder &MIRBuilder,
3400 SPIRVGlobalRegistry *GR) {
3401 assert(ExtensionType->getNumIntParameters() == 1 &&
3402 "Invalid number of parameters for SPIR-V pipe builtin!");
3403 // Create or get an existing type from GlobalRegistry.
3404 return GR->getOrCreateOpTypePipe(MIRBuilder,
3405 SPIRV::AccessQualifier::AccessQualifier(
3406 ExtensionType->getIntParameter(0)));
3407}
3408
3409static SPIRVType *getCoopMatrType(const TargetExtType *ExtensionType,
3410 MachineIRBuilder &MIRBuilder,
3411 SPIRVGlobalRegistry *GR) {
3412 assert(ExtensionType->getNumIntParameters() == 4 &&
3413 "Invalid number of parameters for SPIR-V coop matrices builtin!");
3414 assert(ExtensionType->getNumTypeParameters() == 1 &&
3415 "SPIR-V coop matrices builtin type must have a type parameter!");
3416 const SPIRVType *ElemType =
3417 GR->getOrCreateSPIRVType(ExtensionType->getTypeParameter(0), MIRBuilder,
3418 SPIRV::AccessQualifier::ReadWrite, true);
3419 // Create or get an existing type from GlobalRegistry.
3420 return GR->getOrCreateOpTypeCoopMatr(
3421 MIRBuilder, ExtensionType, ElemType, ExtensionType->getIntParameter(0),
3422 ExtensionType->getIntParameter(1), ExtensionType->getIntParameter(2),
3423 ExtensionType->getIntParameter(3), true);
3424}
3425
3427 MachineIRBuilder &MIRBuilder,
3428 SPIRVGlobalRegistry *GR) {
3429 SPIRVType *OpaqueImageType = GR->getImageType(
3430 OpaqueType, SPIRV::AccessQualifier::ReadOnly, MIRBuilder);
3431 // Create or get an existing type from GlobalRegistry.
3432 return GR->getOrCreateOpTypeSampledImage(OpaqueImageType, MIRBuilder);
3433}
3434
3435static SPIRVType *getInlineSpirvType(const TargetExtType *ExtensionType,
3436 MachineIRBuilder &MIRBuilder,
3437 SPIRVGlobalRegistry *GR) {
3438 assert(ExtensionType->getNumIntParameters() == 3 &&
3439 "Inline SPIR-V type builtin takes an opcode, size, and alignment "
3440 "parameter");
3441 auto Opcode = ExtensionType->getIntParameter(0);
3442
3443 SmallVector<MCOperand> Operands;
3444 for (Type *Param : ExtensionType->type_params()) {
3445 if (const TargetExtType *ParamEType = dyn_cast<TargetExtType>(Param)) {
3446 if (ParamEType->getName() == "spirv.IntegralConstant") {
3447 assert(ParamEType->getNumTypeParameters() == 1 &&
3448 "Inline SPIR-V integral constant builtin must have a type "
3449 "parameter");
3450 assert(ParamEType->getNumIntParameters() == 1 &&
3451 "Inline SPIR-V integral constant builtin must have a "
3452 "value parameter");
3453
3454 auto OperandValue = ParamEType->getIntParameter(0);
3455 auto *OperandType = ParamEType->getTypeParameter(0);
3456
3457 const SPIRVType *OperandSPIRVType = GR->getOrCreateSPIRVType(
3458 OperandType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, true);
3459
3461 OperandValue, MIRBuilder, OperandSPIRVType, true)));
3462 continue;
3463 } else if (ParamEType->getName() == "spirv.Literal") {
3464 assert(ParamEType->getNumTypeParameters() == 0 &&
3465 "Inline SPIR-V literal builtin does not take type "
3466 "parameters");
3467 assert(ParamEType->getNumIntParameters() == 1 &&
3468 "Inline SPIR-V literal builtin must have an integer "
3469 "parameter");
3470
3471 auto OperandValue = ParamEType->getIntParameter(0);
3472
3473 Operands.push_back(MCOperand::createImm(OperandValue));
3474 continue;
3475 }
3476 }
3477 const SPIRVType *TypeOperand = GR->getOrCreateSPIRVType(
3478 Param, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, true);
3479 Operands.push_back(MCOperand::createReg(GR->getSPIRVTypeID(TypeOperand)));
3480 }
3481
3482 return GR->getOrCreateUnknownType(ExtensionType, MIRBuilder, Opcode,
3483 Operands);
3484}
3485
3486static SPIRVType *getVulkanBufferType(const TargetExtType *ExtensionType,
3487 MachineIRBuilder &MIRBuilder,
3488 SPIRVGlobalRegistry *GR) {
3489 assert(ExtensionType->getNumTypeParameters() == 1 &&
3490 "Vulkan buffers have exactly one type for the type of the buffer.");
3491 assert(ExtensionType->getNumIntParameters() == 2 &&
3492 "Vulkan buffer have 2 integer parameters: storage class and is "
3493 "writable.");
3494
3495 auto *T = ExtensionType->getTypeParameter(0);
3496 auto SC = static_cast<SPIRV::StorageClass::StorageClass>(
3497 ExtensionType->getIntParameter(0));
3498 bool IsWritable = ExtensionType->getIntParameter(1);
3499 return GR->getOrCreateVulkanBufferType(MIRBuilder, T, SC, IsWritable);
3500}
3501
3503 MachineIRBuilder &MIRBuilder,
3504 SPIRVGlobalRegistry *GR) {
3505 assert(ExtensionType->getNumTypeParameters() == 1 &&
3506 "Vulkan push constants have exactly one type as argument.");
3507 auto *T = ExtensionType->getTypeParameter(0);
3508 return GR->getOrCreateVulkanPushConstantType(MIRBuilder, T);
3509}
3510
3511static SPIRVType *getLayoutType(const TargetExtType *ExtensionType,
3512 MachineIRBuilder &MIRBuilder,
3513 SPIRVGlobalRegistry *GR) {
3514 return GR->getOrCreateLayoutType(MIRBuilder, ExtensionType);
3515}
3516
3517namespace SPIRV {
3519 LLVMContext &Context) {
3520 StringRef NameWithParameters = TypeName;
3521
3522 // Pointers-to-opaque-structs representing OpenCL types are first translated
3523 // to equivalent SPIR-V types. OpenCL builtin type names should have the
3524 // following format: e.g. %opencl.event_t
3525 if (NameWithParameters.starts_with("opencl.")) {
3526 const SPIRV::OpenCLType *OCLTypeRecord =
3527 SPIRV::lookupOpenCLType(NameWithParameters);
3528 if (!OCLTypeRecord)
3529 report_fatal_error("Missing TableGen record for OpenCL type: " +
3530 NameWithParameters);
3531 NameWithParameters = OCLTypeRecord->SpirvTypeLiteral;
3532 // Continue with the SPIR-V builtin type...
3533 }
3534
3535 // Names of the opaque structs representing a SPIR-V builtins without
3536 // parameters should have the following format: e.g. %spirv.Event
3537 assert(NameWithParameters.starts_with("spirv.") &&
3538 "Unknown builtin opaque type!");
3539
3540 // Parameterized SPIR-V builtins names follow this format:
3541 // e.g. %spirv.Image._void_1_0_0_0_0_0_0, %spirv.Pipe._0
3542 if (!NameWithParameters.contains('_'))
3543 return TargetExtType::get(Context, NameWithParameters);
3544
3545 SmallVector<StringRef> Parameters;
3546 unsigned BaseNameLength = NameWithParameters.find('_') - 1;
3547 SplitString(NameWithParameters.substr(BaseNameLength + 1), Parameters, "_");
3548
3549 SmallVector<Type *, 1> TypeParameters;
3550 bool HasTypeParameter = !isDigit(Parameters[0][0]);
3551 if (HasTypeParameter)
3552 TypeParameters.push_back(parseTypeString(Parameters[0], Context));
3553 SmallVector<unsigned> IntParameters;
3554 for (unsigned i = HasTypeParameter ? 1 : 0; i < Parameters.size(); i++) {
3555 unsigned IntParameter = 0;
3556 bool ValidLiteral = !Parameters[i].getAsInteger(10, IntParameter);
3557 (void)ValidLiteral;
3558 assert(ValidLiteral &&
3559 "Invalid format of SPIR-V builtin parameter literal!");
3560 IntParameters.push_back(IntParameter);
3561 }
3562 return TargetExtType::get(Context,
3563 NameWithParameters.substr(0, BaseNameLength),
3564 TypeParameters, IntParameters);
3565}
3566
3568 SPIRV::AccessQualifier::AccessQualifier AccessQual,
3569 MachineIRBuilder &MIRBuilder,
3570 SPIRVGlobalRegistry *GR) {
3571 // In LLVM IR, SPIR-V and OpenCL builtin types are represented as either
3572 // target(...) target extension types or pointers-to-opaque-structs. The
3573 // approach relying on structs is deprecated and works only in the non-opaque
3574 // pointer mode (-opaque-pointers=0).
3575 // In order to maintain compatibility with LLVM IR generated by older versions
3576 // of Clang and LLVM/SPIR-V Translator, the pointers-to-opaque-structs are
3577 // "translated" to target extension types. This translation is temporary and
3578 // will be removed in the future release of LLVM.
3580 if (!BuiltinType)
3582 OpaqueType->getStructName().str(), MIRBuilder.getContext());
3583
3584 unsigned NumStartingVRegs = MIRBuilder.getMRI()->getNumVirtRegs();
3585
3586 const StringRef Name = BuiltinType->getName();
3587 LLVM_DEBUG(dbgs() << "Lowering builtin type: " << Name << "\n");
3588
3589 SPIRVType *TargetType;
3590 if (Name == "spirv.Type") {
3591 TargetType = getInlineSpirvType(BuiltinType, MIRBuilder, GR);
3592 } else if (Name == "spirv.VulkanBuffer") {
3593 TargetType = getVulkanBufferType(BuiltinType, MIRBuilder, GR);
3594 } else if (Name == "spirv.Padding") {
3595 TargetType = GR->getOrCreatePaddingType(MIRBuilder);
3596 } else if (Name == "spirv.PushConstant") {
3597 TargetType = getVulkanPushConstantType(BuiltinType, MIRBuilder, GR);
3598 } else if (Name == "spirv.Layout") {
3599 TargetType = getLayoutType(BuiltinType, MIRBuilder, GR);
3600 } else {
3601 // Lookup the demangled builtin type in the TableGen records.
3602 const SPIRV::BuiltinType *TypeRecord = SPIRV::lookupBuiltinType(Name);
3603 if (!TypeRecord)
3604 report_fatal_error("Missing TableGen record for builtin type: " + Name);
3605
3606 // "Lower" the BuiltinType into TargetType. The following get<...>Type
3607 // methods use the implementation details from TableGen records or
3608 // TargetExtType parameters to either create a new OpType<...> machine
3609 // instruction or get an existing equivalent SPIRVType from
3610 // GlobalRegistry.
3611
3612 switch (TypeRecord->Opcode) {
3613 case SPIRV::OpTypeImage:
3614 TargetType = GR->getImageType(BuiltinType, AccessQual, MIRBuilder);
3615 break;
3616 case SPIRV::OpTypePipe:
3617 TargetType = getPipeType(BuiltinType, MIRBuilder, GR);
3618 break;
3619 case SPIRV::OpTypeDeviceEvent:
3620 TargetType = GR->getOrCreateOpTypeDeviceEvent(MIRBuilder);
3621 break;
3622 case SPIRV::OpTypeSampler:
3623 TargetType = getSamplerType(MIRBuilder, GR);
3624 break;
3625 case SPIRV::OpTypeSampledImage:
3626 TargetType = getSampledImageType(BuiltinType, MIRBuilder, GR);
3627 break;
3628 case SPIRV::OpTypeCooperativeMatrixKHR:
3629 TargetType = getCoopMatrType(BuiltinType, MIRBuilder, GR);
3630 break;
3631 default:
3632 TargetType =
3633 getNonParameterizedType(BuiltinType, TypeRecord, MIRBuilder, GR);
3634 break;
3635 }
3636 }
3637
3638 // Emit OpName instruction if a new OpType<...> instruction was added
3639 // (equivalent type was not found in GlobalRegistry).
3640 if (NumStartingVRegs < MIRBuilder.getMRI()->getNumVirtRegs())
3641 buildOpName(GR->getSPIRVTypeID(TargetType), Name, MIRBuilder);
3642
3643 return TargetType;
3644}
3645} // namespace SPIRV
3646} // namespace llvm
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Lower Kernel Arguments
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
Promote Memory to Register
Definition Mem2Reg.cpp:110
#define T
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file contains some functions that are useful when dealing with strings.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static const fltSemantics & IEEEsingle()
Definition APFloat.h:296
APInt bitcastToAPInt() const
Definition APFloat.h:1416
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Definition APFloat.h:1142
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1549
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
const APFloat & getValueAPF() const
Definition Constants.h:325
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Tagged union holding either a T or a Error.
Definition Error.h:485
Class to represent fixed width SIMD vectors.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:358
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
static MCOperand createReg(MCRegister Reg)
Definition MCInst.h:138
static MCOperand createImm(int64_t Val)
Definition MCInst.h:145
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
LLVMContext & getContext() const
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
const DataLayout & getDataLayout() const
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addDef(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register definition operand.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
LLVM_ABI void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
void setFlag(MIFlag Flag)
Set a MI flag.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
const GlobalValue * getGlobal() const
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
const MDNode * getMetadata() const
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
LLVM_ABI void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
unsigned getNumVirtRegs() const
getNumVirtRegs - Return the number of virtual registers created.
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isValid() const
Definition Register.h:112
SPIRVType * getOrCreateOpTypePipe(MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AccQual)
SPIRVType * getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
SPIRVType * getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder, bool EmitIR)
SPIRVType * getOrCreateVulkanPushConstantType(MachineIRBuilder &MIRBuilder, Type *ElemType)
SPIRVType * getOrCreatePaddingType(MachineIRBuilder &MIRBuilder)
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, const MachineFunction &MF)
Register buildGlobalVariable(Register Reg, SPIRVType *BaseType, StringRef Name, const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage, const MachineInstr *Init, bool IsConst, const std::optional< SPIRV::LinkageType::LinkageType > &LinkageType, MachineIRBuilder &MIRBuilder, bool IsInstSelector)
const Type * getTypeForSPIRVType(const SPIRVType *Ty) const
SPIRVType * getOrCreateUnknownType(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode, const ArrayRef< MCOperand > Operands)
unsigned getScalarOrVectorComponentCount(Register VReg) const
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineInstr &I, SPIRV::AccessQualifier::AccessQualifier AQ, bool EmitIR)
SPIRVType * getOrCreateSPIRVPointerType(const Type *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SC)
SPIRVType * getOrCreateOpTypeByOpcode(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode)
Register buildConstantFP(APFloat Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr)
SPIRVType * getPointeeType(SPIRVType *PtrType)
Register getSPIRVTypeID(const SPIRVType *SpirvType) const
bool isScalarOfType(Register VReg, unsigned TypeOpcode) const
SPIRVType * getOrCreateOpTypeSampledImage(SPIRVType *ImageType, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateVulkanBufferType(MachineIRBuilder &MIRBuilder, Type *ElemType, SPIRV::StorageClass::StorageClass SC, bool IsWritable, bool EmitIr=false)
SPIRVType * getOrCreateSPIRVTypeByName(StringRef TypeStr, MachineIRBuilder &MIRBuilder, bool EmitIR, SPIRV::StorageClass::StorageClass SC=SPIRV::StorageClass::Function, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite)
SPIRVType * getOrCreateLayoutType(MachineIRBuilder &MIRBuilder, const TargetExtType *T, bool EmitIr=false)
Register getOrCreateConsIntVector(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, bool EmitIR)
const TargetRegisterClass * getRegClass(SPIRVType *SpvType) const
SPIRVType * getOrCreateSPIRVVectorType(SPIRVType *BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder, bool EmitIR)
SPIRVType * getOrCreateOpTypeCoopMatr(MachineIRBuilder &MIRBuilder, const TargetExtType *ExtensionType, const SPIRVType *ElemType, uint32_t Scope, uint32_t Rows, uint32_t Columns, uint32_t Use, bool EmitIR)
bool isScalarOrVectorOfType(Register VReg, unsigned TypeOpcode) const
Register getOrCreateConstIntArray(uint64_t Val, size_t Num, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
SPIRVType * getOrCreateOpTypeDeviceEvent(MachineIRBuilder &MIRBuilder)
SPIRVType * getImageType(const TargetExtType *ExtensionType, const SPIRV::AccessQualifier::AccessQualifier Qualifier, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineIRBuilder &MIRBuilder)
Register buildConstantInt(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, bool EmitIR, bool ZeroAsNull=true)
LLT getRegType(SPIRVType *SpvType) const
SPIRV::StorageClass::StorageClass getPointerStorageClass(Register VReg) const
SPIRVType * getOrCreateOpTypeSampler(MachineIRBuilder &MIRBuilder)
Register buildConstantSampler(Register Res, unsigned AddrMode, unsigned Param, unsigned FilerMode, MachineIRBuilder &MIRBuilder)
Register getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorBitWidth(const SPIRVType *Type) const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:712
static constexpr size_t npos
Definition StringRef.h:57
bool consume_back(StringRef Suffix)
Returns true if this StringRef has the given suffix and removes that suffix.
Definition StringRef.h:667
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:472
std::string str() const
str - Get the contents as an std::string.
Definition StringRef.h:225
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition StringRef.h:573
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
bool contains_insensitive(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition StringRef.h:438
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
Definition StringRef.h:696
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition StringRef.h:426
size_t find_first_of(char C, size_t From=0) const
Find the first character in the string that is C, or npos if not found.
Definition StringRef.h:376
size_t find(char C, size_t From=0) const
Search for the first character C in the string.
Definition StringRef.h:293
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition StringRef.h:273
bool consume_front(char Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
Definition StringRef.h:637
A switch()-like statement whose cases are string literals.
StringSwitch & EndsWith(StringLiteral S, T Value)
Class to represent struct types.
Class to represent target extensions types, which are generally unintrospectable from target-independ...
ArrayRef< Type * > type_params() const
Return the type parameters for this particular target extension type.
unsigned getNumIntParameters() const
static LLVM_ABI TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types={}, ArrayRef< unsigned > Ints={})
Return a target extension type having the specified name and optional type and integer parameters.
Definition Type.cpp:907
Type * getTypeParameter(unsigned i) const
unsigned getNumTypeParameters() const
unsigned getIntParameter(unsigned i) const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
LLVM_ABI StringRef getStructName() const
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:280
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:294
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
Definition Type.cpp:284
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
Definition Type.cpp:282
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
LLVM Value Representation.
Definition Value.h:75
LLVM_ABI Value(Type *Ty, unsigned scid)
Definition Value.cpp:53
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Represents a version number in the form major[.minor[.subminor[.build]]].
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
CallInst * Call
LLVM_C_ABI LLVMTypeRef LLVMVectorType(LLVMTypeRef ElementType, unsigned ElementCount)
Create a vector type that contains a defined type and has a specific number of elements.
Definition Core.cpp:883
std::string lookupBuiltinNameHelper(StringRef DemangledCall, FPDecorationId *DecorationId)
Parses the name part of the demangled builtin call.
Type * parseBuiltinCallArgumentType(StringRef TypeStr, LLVMContext &Ctx)
bool parseBuiltinTypeStr(SmallVector< StringRef, 10 > &BuiltinArgsTypeStrs, const StringRef DemangledCall, LLVMContext &Ctx)
std::optional< bool > lowerBuiltin(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, MachineIRBuilder &MIRBuilder, const Register OrigRet, const Type *OrigRetTy, const SmallVectorImpl< Register > &Args, SPIRVGlobalRegistry *GR, const CallBase &CB)
std::tuple< int, unsigned, unsigned > mapBuiltinToOpcode(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set)
Helper function for finding a builtin function attributes by a demangled function name.
Type * parseBuiltinCallArgumentBaseType(const StringRef DemangledCall, unsigned ArgIdx, LLVMContext &Ctx)
Parses the provided ArgIdx argument base type in the DemangledCall skeleton.
TargetExtType * parseBuiltinTypeNameToTargetExtType(std::string TypeName, LLVMContext &Context)
Translates a string representing a SPIR-V or OpenCL builtin type to a TargetExtType that can be furth...
SPIRVType * lowerBuiltinType(const Type *OpaqueType, SPIRV::AccessQualifier::AccessQualifier AccessQual, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
static bool build2DBlockIOINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's 2d block io instructions.
static SPIRVType * getVulkanBufferType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateExtInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, const CallBase &CB)
static bool generateBindlessImageINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateGetQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getInlineSpirvType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateConstructInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicFlagInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic flag instructions (e.g.
static bool generateImageSizeQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::SamplerFilterMode::SamplerFilterMode getSamplerFilterModeFromBitmask(unsigned Bitmask)
static bool buildAtomicStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic store instruction.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
static bool buildExtendedBitOpsInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building extended bit operations.
static const Type * getBlockStructType(Register ParamReg, MachineRegisterInfo *MRI)
static bool generateGroupInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
FPDecorationId demangledPostfixToDecorationId(const std::string &S)
Definition SPIRVUtils.h:551
void updateRegType(Register Reg, Type *Ty, SPIRVType *SpirvTy, SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Helper external function for assigning SPIRVType to a register, ensuring the register class and type ...
static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim)
static bool generateImageChannelDataTypeInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool builtinMayNeedPromotionToVec(uint32_t BuiltinNumber)
static bool generateICarryBorrowInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildScopeReg(Register CLScopeRegister, SPIRV::Scope::Scope Scope, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI)
FPDecorationId
Definition SPIRVUtils.h:549
static std::tuple< Register, SPIRVType * > buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType, SPIRVGlobalRegistry *GR)
Helper function building either a resulting scalar or vector bool register depending on the expected ...
static unsigned getNumSizeComponents(SPIRVType *imgType)
Helper function for obtaining the number of size components.
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
static Register buildConstantIntReg32(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getSampledImageType(const TargetExtType *OpaqueType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
SmallVector< MachineInstr *, 4 > createContinuedInstructions(MachineIRBuilder &MIRBuilder, unsigned Opcode, unsigned MinWC, unsigned ContinuedOpcode, ArrayRef< Register > Args, Register ReturnRegister, Register TypeID)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
constexpr unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
Definition SPIRVUtils.h:244
static bool generateSampleImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateBarrierInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI void SplitString(StringRef Source, SmallVectorImpl< StringRef > &OutFragments, StringRef Delimiters=" \t\n\v\f\r")
SplitString - Split up the specified string according to the specified delimiters,...
static SPIRVType * getCoopMatrType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildImageChannelDataTypeInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateKernelClockInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static void setRegClassIfNull(Register Reg, MachineRegisterInfo *MRI, SPIRVGlobalRegistry *GR)
static bool generateGroupUniformInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateWaveInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Register createVirtualRegister(SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building barriers, i.e., memory/control ordering operations.
static bool generateAsyncCopy(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getVulkanPushConstantType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope)
static bool buildAPFixedPointInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getSamplerType(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateBlockingPipesInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
static Register buildLoadInst(SPIRVType *BaseType, Register PtrRegister, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, LLT LowLevelType, Register DestinationReg=Register(0))
Helper function for building a load instruction loading into the DestinationReg.
static bool generateEnqueueInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
static bool buildSelectInst(MachineIRBuilder &MIRBuilder, Register ReturnRegister, Register SourceRegister, const SPIRVType *ReturnType, SPIRVGlobalRegistry *GR)
Helper function for building either a vector or scalar select instruction depending on the expected R...
static const Type * getMachineInstrType(MachineInstr *MI)
bool isDigit(char C)
Checks if character C is one of the 10 decimal digits.
static SPIRV::SamplerAddressingMode::SamplerAddressingMode getSamplerAddressingModeFromBitmask(unsigned Bitmask)
static bool generateAtomicInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
const MachineInstr SPIRVType
static SPIRVType * getLayoutType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateDotOrFMulInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildBuiltinVariableLoad(MachineIRBuilder &MIRBuilder, SPIRVType *VariableType, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType, Register Reg=Register(0), bool isConst=true, const std::optional< SPIRV::LinkageType::LinkageType > &LinkageTy={ SPIRV::LinkageType::Import})
Helper function for building a load instruction for loading a builtin global variable of BuiltinValue...
static bool generateConvertInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateTernaryBitwiseFunctionINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateCastToPtrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
constexpr bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
Definition SPIRVUtils.h:229
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
static Register buildMemSemanticsReg(Register SemanticsRegister, Register PtrRegister, unsigned &Semantics, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static unsigned getConstFromIntrinsic(Register Reg, MachineRegisterInfo *MRI)
static bool generateImageMiscQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSelectInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic load instruction.
static bool generateIntelSubgroupsInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateExtendedBitOpsInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildPipeInst(const SPIRV::IncomingCall *Call, unsigned Opcode, unsigned Scope, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSpecConstantInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getOrCreateSPIRVDeviceEventPointer(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * parseBasicTypeName(StringRef &TypeName, LLVMContext &Ctx)
static bool generateVectorLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, uint64_t DefaultValue)
static bool generateCoopMatrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SmallVector< Register > getBuiltinCallArguments(const SPIRV::IncomingCall *Call, uint32_t BuiltinNumber, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildBindlessImageINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's bindless image instructions.
static std::unique_ptr< const SPIRV::IncomingCall > lookupBuiltin(StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
Looks up the demangled builtin call in the SPIRVBuiltins.td records using the provided DemangledCall ...
static bool buildAtomicFloatingRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic floating-type instruction.
MachineInstr * getDefInstrMaybeConstant(Register &ConstReg, const MachineRegisterInfo *MRI)
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1915
static bool generate2DBlockIOINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateReadImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
bool hasBuiltinTypePrefix(StringRef Name)
static bool buildEnqueueKernel(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * getMDOperandAsType(const MDNode *N, unsigned I)
static bool generatePipeInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildTernaryBitwiseFunctionINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's OpBitwiseFunctionINTEL instruction.
static bool generateAPFixedPointInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic instructions.
static SPIRV::MemorySemantics::MemorySemantics getSPIRVMemSemantics(std::memory_order MemOrder)
static bool generateRelationalInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicInitInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
Helper function for translating atomic init to OpStore.
static bool generateWriteImageInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getPipeType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Type * parseTypeString(const StringRef Name, LLVMContext &Context)
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
static bool generatePredicatedLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateAtomicFloatingInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateAFPInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildNDRange(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getNonParameterizedType(const TargetExtType *ExtensionType, const SPIRV::BuiltinType *TypeRecord, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static MachineInstr * getBlockStructInstr(Register ParamReg, MachineRegisterInfo *MRI)
static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode, const SPIRV::IncomingCall *Call, Register TypeReg, ArrayRef< uint32_t > ImmArgs={})
static unsigned getSamplerParamFromBitmask(unsigned Bitmask)
static bool buildAtomicCompareExchangeInst(const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic compare-exchange instruction.
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
static bool generateBuiltinVar(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:870
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
This class contains a discriminated union of information about pointers in memory operands,...
FPRoundingMode::FPRoundingMode RoundingMode
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
const SmallVectorImpl< Register > & Arguments
const std::string BuiltinName
const SPIRVType * ReturnType
const DemangledBuiltin * Builtin
IncomingCall(const std::string BuiltinName, const DemangledBuiltin *Builtin, const Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
FPRoundingMode::FPRoundingMode RoundingMode