LLVM 23.0.0git
SPIRVBuiltins.cpp
Go to the documentation of this file.
1//===- SPIRVBuiltins.cpp - SPIR-V Built-in Functions ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements lowering builtin function calls and types using their
10// demangled names and TableGen records.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SPIRVBuiltins.h"
15#include "SPIRV.h"
16#include "SPIRVSubtarget.h"
17#include "SPIRVUtils.h"
20#include "llvm/IR/IntrinsicsSPIRV.h"
21#include <regex>
22#include <string>
23#include <tuple>
24
25#define DEBUG_TYPE "spirv-builtins"
26
27namespace llvm {
28namespace SPIRV {
29#define GET_BuiltinGroup_DECL
30#include "SPIRVGenTables.inc"
31
34 InstructionSet::InstructionSet Set;
35 BuiltinGroup Group;
38};
39
40#define GET_DemangledBuiltins_DECL
41#define GET_DemangledBuiltins_IMPL
42
60
63 InstructionSet::InstructionSet Set;
65};
66
67#define GET_NativeBuiltins_DECL
68#define GET_NativeBuiltins_IMPL
69
85
86#define GET_GroupBuiltins_DECL
87#define GET_GroupBuiltins_IMPL
88
96
97#define GET_IntelSubgroupsBuiltins_DECL
98#define GET_IntelSubgroupsBuiltins_IMPL
99
104
105#define GET_AtomicFloatingBuiltins_DECL
106#define GET_AtomicFloatingBuiltins_IMPL
112
113#define GET_GroupUniformBuiltins_DECL
114#define GET_GroupUniformBuiltins_IMPL
115
118 InstructionSet::InstructionSet Set;
119 BuiltIn::BuiltIn Value;
120};
121
122using namespace BuiltIn;
123#define GET_GetBuiltins_DECL
124#define GET_GetBuiltins_IMPL
125
128 InstructionSet::InstructionSet Set;
130};
131
132#define GET_ImageQueryBuiltins_DECL
133#define GET_ImageQueryBuiltins_IMPL
134
140
141#define GET_IntegerDotProductBuiltins_DECL
142#define GET_IntegerDotProductBuiltins_IMPL
143
146 InstructionSet::InstructionSet Set;
151 bool IsTF32;
152 FPRoundingMode::FPRoundingMode RoundingMode;
153};
154
157 InstructionSet::InstructionSet Set;
161 FPRoundingMode::FPRoundingMode RoundingMode;
162};
163
164using namespace FPRoundingMode;
165#define GET_ConvertBuiltins_DECL
166#define GET_ConvertBuiltins_IMPL
167
168using namespace InstructionSet;
169#define GET_VectorLoadStoreBuiltins_DECL
170#define GET_VectorLoadStoreBuiltins_IMPL
171
172#define GET_CLMemoryScope_DECL
173#define GET_CLSamplerAddressingMode_DECL
174#define GET_CLMemoryFenceFlags_DECL
175#define GET_ExtendedBuiltins_DECL
176#include "SPIRVGenTables.inc"
177} // namespace SPIRV
178
179//===----------------------------------------------------------------------===//
180// Misc functions for looking up builtins and veryfying requirements using
181// TableGen records
182//===----------------------------------------------------------------------===//
183
184namespace SPIRV {
185/// Parses the name part of the demangled builtin call.
186std::string lookupBuiltinNameHelper(StringRef DemangledCall,
187 FPDecorationId *DecorationId) {
188 StringRef PassPrefix = "(anonymous namespace)::";
189 StringRef SpvPrefix = "__spv::";
190 std::string BuiltinName = DemangledCall.str();
191
192 // Check if the extracted name contains type information between angle
193 // brackets. If so, the builtin is an instantiated template - needs to have
194 // the information after angle brackets and return type removed.
195 std::size_t Pos = BuiltinName.find(">(");
196 if (Pos != std::string::npos) {
197 BuiltinName = BuiltinName.substr(0, BuiltinName.rfind('<', Pos));
198 } else {
199 Pos = BuiltinName.find('(');
200 if (Pos != std::string::npos)
201 BuiltinName = BuiltinName.substr(0, Pos);
202 }
203 BuiltinName = BuiltinName.substr(BuiltinName.find_last_of(' ') + 1);
204
205 // Itanium Demangler result may have "(anonymous namespace)::" or "__spv::"
206 // prefix.
207 if (BuiltinName.find(PassPrefix) == 0)
208 BuiltinName = BuiltinName.substr(PassPrefix.size());
209 else if (BuiltinName.find(SpvPrefix) == 0)
210 BuiltinName = BuiltinName.substr(SpvPrefix.size());
211
212 // Account for possible "__spirv_ocl_" prefix in SPIR-V friendly LLVM IR
213 if (BuiltinName.rfind("__spirv_ocl_", 0) == 0)
214 BuiltinName = BuiltinName.substr(12);
215
216 // Check if the extracted name begins with:
217 // - "__spirv_ImageSampleExplicitLod"
218 // - "__spirv_ImageRead"
219 // - "__spirv_ImageWrite"
220 // - "__spirv_ImageQuerySizeLod"
221 // - "__spirv_UDotKHR"
222 // - "__spirv_SDotKHR"
223 // - "__spirv_SUDotKHR"
224 // - "__spirv_SDotAccSatKHR"
225 // - "__spirv_UDotAccSatKHR"
226 // - "__spirv_SUDotAccSatKHR"
227 // - "__spirv_ReadClockKHR"
228 // - "__spirv_SubgroupBlockReadINTEL"
229 // - "__spirv_SubgroupImageBlockReadINTEL"
230 // - "__spirv_SubgroupImageMediaBlockReadINTEL"
231 // - "__spirv_SubgroupImageMediaBlockWriteINTEL"
232 // - "__spirv_Convert"
233 // - "__spirv_Round"
234 // - "__spirv_UConvert"
235 // - "__spirv_SConvert"
236 // - "__spirv_FConvert"
237 // - "__spirv_SatConvert"
238 // and maybe contains return type information at the end "_R<type>".
239 // If so, extract the plain builtin name without the type information.
240 static const std::regex SpvWithR(
241 "(__spirv_(ImageSampleExplicitLod|ImageRead|ImageWrite|ImageQuerySizeLod|"
242 "UDotKHR|"
243 "SDotKHR|SUDotKHR|SDotAccSatKHR|UDotAccSatKHR|SUDotAccSatKHR|"
244 "ReadClockKHR|SubgroupBlockReadINTEL|SubgroupImageBlockReadINTEL|"
245 "SubgroupImageMediaBlockReadINTEL|SubgroupImageMediaBlockWriteINTEL|"
246 "Convert|Round|"
247 "UConvert|SConvert|FConvert|SatConvert)[^_]*)(_R[^_]*_?(\\w+)?.*)?");
248 std::smatch Match;
249 if (std::regex_match(BuiltinName, Match, SpvWithR) && Match.size() > 1) {
250 std::ssub_match SubMatch;
251 if (DecorationId && Match.size() > 3) {
252 SubMatch = Match[4];
253 *DecorationId = demangledPostfixToDecorationId(SubMatch.str());
254 }
255 SubMatch = Match[1];
256 BuiltinName = SubMatch.str();
257 }
258
259 return BuiltinName;
260}
261} // namespace SPIRV
262
263/// Looks up the demangled builtin call in the SPIRVBuiltins.td records using
264/// the provided \p DemangledCall and specified \p Set.
265///
266/// The lookup follows the following algorithm, returning the first successful
267/// match:
268/// 1. Search with the plain demangled name (expecting a 1:1 match).
269/// 2. Search with the prefix before or suffix after the demangled name
270/// signyfying the type of the first argument.
271///
272/// \returns Wrapper around the demangled call and found builtin definition.
273static std::unique_ptr<const SPIRV::IncomingCall>
275 SPIRV::InstructionSet::InstructionSet Set,
276 Register ReturnRegister, SPIRVTypeInst ReturnType,
278 std::string BuiltinName = SPIRV::lookupBuiltinNameHelper(DemangledCall);
279
280 SmallVector<StringRef, 10> BuiltinArgumentTypes;
281 StringRef BuiltinArgs =
282 DemangledCall.slice(DemangledCall.find('(') + 1, DemangledCall.find(')'));
283 BuiltinArgs.split(BuiltinArgumentTypes, ',', -1, false);
284
285 // Look up the builtin in the defined set. Start with the plain demangled
286 // name, expecting a 1:1 match in the defined builtin set.
287 const SPIRV::DemangledBuiltin *Builtin;
288 if ((Builtin = SPIRV::lookupBuiltin(BuiltinName, Set)))
289 return std::make_unique<SPIRV::IncomingCall>(
290 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
291
292 // If the initial look up was unsuccessful and the demangled call takes at
293 // least 1 argument, add a prefix or suffix signifying the type of the first
294 // argument and repeat the search.
295 if (BuiltinArgumentTypes.size() >= 1) {
296 char FirstArgumentType = BuiltinArgumentTypes[0][0];
297 // Prefix to be added to the builtin's name for lookup.
298 // For example, OpenCL "abs" taking an unsigned value has a prefix "u_".
299 std::string Prefix;
300
301 switch (FirstArgumentType) {
302 // Unsigned:
303 case 'u':
304 if (Set == SPIRV::InstructionSet::OpenCL_std)
305 Prefix = "u_";
306 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
307 Prefix = "u";
308 break;
309 // Signed:
310 case 'c':
311 case 's':
312 case 'i':
313 case 'l':
314 if (Set == SPIRV::InstructionSet::OpenCL_std)
315 Prefix = "s_";
316 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
317 Prefix = "s";
318 break;
319 // Floating-point:
320 case 'f':
321 case 'd':
322 case 'h':
323 if (Set == SPIRV::InstructionSet::OpenCL_std ||
324 Set == SPIRV::InstructionSet::GLSL_std_450)
325 Prefix = "f";
326 break;
327 }
328
329 // If argument-type name prefix was added, look up the builtin again.
330 if (!Prefix.empty() &&
331 (Builtin = SPIRV::lookupBuiltin(Prefix + BuiltinName, Set)))
332 return std::make_unique<SPIRV::IncomingCall>(
333 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
334
335 // If lookup with a prefix failed, find a suffix to be added to the
336 // builtin's name for lookup. For example, OpenCL "group_reduce_max" taking
337 // an unsigned value has a suffix "u".
338 std::string Suffix;
339
340 switch (FirstArgumentType) {
341 // Unsigned:
342 case 'u':
343 Suffix = "u";
344 break;
345 // Signed:
346 case 'c':
347 case 's':
348 case 'i':
349 case 'l':
350 Suffix = "s";
351 break;
352 // Floating-point:
353 case 'f':
354 case 'd':
355 case 'h':
356 Suffix = "f";
357 break;
358 }
359
360 // If argument-type name suffix was added, look up the builtin again.
361 if (!Suffix.empty() &&
362 (Builtin = SPIRV::lookupBuiltin(BuiltinName + Suffix, Set)))
363 return std::make_unique<SPIRV::IncomingCall>(
364 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
365 }
366
367 // No builtin with such name was found in the set.
368 return nullptr;
369}
370
372 MachineRegisterInfo *MRI) {
373 // We expect the following sequence of instructions:
374 // %0:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.alloca)
375 // or = G_GLOBAL_VALUE @block_literal_global
376 // %1:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.bitcast), %0
377 // %2:_(p4) = G_ADDRSPACE_CAST %1:_(pN)
378 MachineInstr *MI = MRI->getUniqueVRegDef(ParamReg);
379 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST &&
380 MI->getOperand(1).isReg());
381 Register BitcastReg = MI->getOperand(1).getReg();
382 MachineInstr *BitcastMI = MRI->getUniqueVRegDef(BitcastReg);
383 assert(isSpvIntrinsic(*BitcastMI, Intrinsic::spv_bitcast) &&
384 BitcastMI->getOperand(2).isReg());
385 Register ValueReg = BitcastMI->getOperand(2).getReg();
386 MachineInstr *ValueMI = MRI->getUniqueVRegDef(ValueReg);
387 return ValueMI;
388}
389
390// Return an integer constant corresponding to the given register and
391// defined in spv_track_constant.
392// TODO: maybe unify with prelegalizer pass.
395 assert(DefMI->getOpcode() == TargetOpcode::G_CONSTANT &&
396 DefMI->getOperand(1).isCImm());
397 return DefMI->getOperand(1).getCImm()->getValue().getZExtValue();
398}
399
400// Return type of the instruction result from spv_assign_type intrinsic.
401// TODO: maybe unify with prelegalizer pass.
403 MachineInstr *NextMI = MI->getNextNode();
404 if (!NextMI)
405 return nullptr;
406 if (isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_name))
407 if ((NextMI = NextMI->getNextNode()) == nullptr)
408 return nullptr;
409 Register ValueReg = MI->getOperand(0).getReg();
410 if ((!isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_type) &&
411 !isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_ptr_type)) ||
412 NextMI->getOperand(1).getReg() != ValueReg)
413 return nullptr;
414 Type *Ty = getMDOperandAsType(NextMI->getOperand(2).getMetadata(), 0);
415 assert(Ty && "Type is expected");
416 return Ty;
417}
418
419static const Type *getBlockStructType(Register ParamReg,
420 MachineRegisterInfo *MRI) {
421 // In principle, this information should be passed to us from Clang via
422 // an elementtype attribute. However, said attribute requires that
423 // the function call be an intrinsic, which is not. Instead, we rely on being
424 // able to trace this to the declaration of a variable: OpenCL C specification
425 // section 6.12.5 should guarantee that we can do this.
426 MachineInstr *MI = getBlockStructInstr(ParamReg, MRI);
427 if (MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE)
428 return MI->getOperand(1).getGlobal()->getType();
429 assert(isSpvIntrinsic(*MI, Intrinsic::spv_alloca) &&
430 "Blocks in OpenCL C must be traceable to allocation site");
431 return getMachineInstrType(MI);
432}
433
434//===----------------------------------------------------------------------===//
435// Helper functions for building misc instructions
436//===----------------------------------------------------------------------===//
437
438/// Helper function building either a resulting scalar or vector bool register
439/// depending on the expected \p ResultType.
440///
441/// \returns Tuple of the resulting register and its type.
442static std::tuple<Register, SPIRVTypeInst>
445 LLT Type;
446 SPIRVTypeInst BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder, true);
447
448 if (ResultType->getOpcode() == SPIRV::OpTypeVector) {
449 unsigned VectorElements = ResultType->getOperand(2).getImm();
450 BoolType = GR->getOrCreateSPIRVVectorType(BoolType, VectorElements,
451 MIRBuilder, true);
454 Type = LLT::vector(LLVMVectorType->getElementCount(), 1);
455 } else {
456 Type = LLT::scalar(1);
457 }
458
459 Register ResultRegister =
461 MIRBuilder.getMRI()->setRegClass(ResultRegister, GR->getRegClass(ResultType));
462 GR->assignSPIRVTypeToVReg(BoolType, ResultRegister, MIRBuilder.getMF());
463 return std::make_tuple(ResultRegister, BoolType);
464}
465
466/// Helper function for building either a vector or scalar select instruction
467/// depending on the expected \p ResultType.
468static bool buildSelectInst(MachineIRBuilder &MIRBuilder,
469 Register ReturnRegister, Register SourceRegister,
470 SPIRVTypeInst ReturnType, SPIRVGlobalRegistry *GR) {
471 Register TrueConst, FalseConst;
472
473 if (ReturnType->getOpcode() == SPIRV::OpTypeVector) {
474 unsigned Bits = GR->getScalarOrVectorBitWidth(ReturnType);
476 TrueConst =
477 GR->getOrCreateConsIntVector(AllOnes, MIRBuilder, ReturnType, true);
478 FalseConst = GR->getOrCreateConsIntVector(0, MIRBuilder, ReturnType, true);
479 } else {
480 TrueConst = GR->buildConstantInt(1, MIRBuilder, ReturnType, true);
481 FalseConst = GR->buildConstantInt(0, MIRBuilder, ReturnType, true);
482 }
483
484 return MIRBuilder.buildSelect(ReturnRegister, SourceRegister, TrueConst,
485 FalseConst);
486}
487
488/// Helper function for building a load instruction loading into the
489/// \p DestinationReg.
491 MachineIRBuilder &MIRBuilder,
492 SPIRVGlobalRegistry *GR, LLT LowLevelType,
493 Register DestinationReg = Register(0)) {
494 if (!DestinationReg.isValid())
495 DestinationReg = createVirtualRegister(BaseType, GR, MIRBuilder);
496 // TODO: consider using correct address space and alignment (p0 is canonical
497 // type for selection though).
499 MIRBuilder.buildLoad(DestinationReg, PtrRegister, PtrInfo, Align());
500 return DestinationReg;
501}
502
503/// Helper function for building a load instruction for loading a builtin global
504/// variable of \p BuiltinValue value.
506 MachineIRBuilder &MIRBuilder, SPIRVTypeInst VariableType,
507 SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType,
508 Register Reg = Register(0), bool isConst = true,
509 const std::optional<SPIRV::LinkageType::LinkageType> &LinkageTy = {
510 SPIRV::LinkageType::Import}) {
511 Register NewRegister =
512 MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::pIDRegClass);
513 MIRBuilder.getMRI()->setType(
514 NewRegister,
515 LLT::pointer(storageClassToAddressSpace(SPIRV::StorageClass::Function),
516 GR->getPointerSize()));
517 SPIRVTypeInst PtrType = GR->getOrCreateSPIRVPointerType(
518 VariableType, MIRBuilder, SPIRV::StorageClass::Input);
519 GR->assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
520
521 // Set up the global OpVariable with the necessary builtin decorations.
522 Register Variable = GR->buildGlobalVariable(
523 NewRegister, PtrType, getLinkStringForBuiltIn(BuiltinValue), nullptr,
524 SPIRV::StorageClass::Input, nullptr, /* isConst= */ isConst, LinkageTy,
525 MIRBuilder, false);
526
527 // Load the value from the global variable.
528 Register LoadedRegister =
529 buildLoadInst(VariableType, Variable, MIRBuilder, GR, LLType, Reg);
530 MIRBuilder.getMRI()->setType(LoadedRegister, LLType);
531 return LoadedRegister;
532}
533
534/// Helper external function for assigning a SPIRV type to a register, ensuring
535/// the register class and type are set in MRI. Defined in
536/// SPIRVPreLegalizer.cpp.
537extern void updateRegType(Register Reg, Type *Ty, SPIRVTypeInst SpirvTy,
540
541// TODO: Move to TableGen.
542static SPIRV::MemorySemantics::MemorySemantics
543getSPIRVMemSemantics(std::memory_order MemOrder) {
544 switch (MemOrder) {
545 case std::memory_order_relaxed:
546 return SPIRV::MemorySemantics::None;
547 case std::memory_order_acquire:
548 return SPIRV::MemorySemantics::Acquire;
549 case std::memory_order_release:
550 return SPIRV::MemorySemantics::Release;
551 case std::memory_order_acq_rel:
552 return SPIRV::MemorySemantics::AcquireRelease;
553 case std::memory_order_seq_cst:
554 return SPIRV::MemorySemantics::SequentiallyConsistent;
555 default:
556 report_fatal_error("Unknown CL memory scope");
557 }
558}
559
560static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope) {
561 switch (ClScope) {
562 case SPIRV::CLMemoryScope::memory_scope_work_item:
563 return SPIRV::Scope::Invocation;
564 case SPIRV::CLMemoryScope::memory_scope_work_group:
565 return SPIRV::Scope::Workgroup;
566 case SPIRV::CLMemoryScope::memory_scope_device:
567 return SPIRV::Scope::Device;
568 case SPIRV::CLMemoryScope::memory_scope_all_svm_devices:
569 return SPIRV::Scope::CrossDevice;
570 case SPIRV::CLMemoryScope::memory_scope_sub_group:
571 return SPIRV::Scope::Subgroup;
572 }
573 report_fatal_error("Unknown CL memory scope");
574}
575
577 MachineIRBuilder &MIRBuilder,
579 return GR->buildConstantInt(
580 Val, MIRBuilder, GR->getOrCreateSPIRVIntegerType(32, MIRBuilder), true);
581}
582
583static Register buildScopeReg(Register CLScopeRegister,
584 SPIRV::Scope::Scope Scope,
585 MachineIRBuilder &MIRBuilder,
587 MachineRegisterInfo *MRI) {
588 if (CLScopeRegister.isValid()) {
589 auto CLScope =
590 static_cast<SPIRV::CLMemoryScope>(getIConstVal(CLScopeRegister, MRI));
591 Scope = getSPIRVScope(CLScope);
592
593 if (CLScope == static_cast<unsigned>(Scope)) {
594 MRI->setRegClass(CLScopeRegister, &SPIRV::iIDRegClass);
595 return CLScopeRegister;
596 }
597 }
598 return buildConstantIntReg32(Scope, MIRBuilder, GR);
599}
600
603 if (MRI->getRegClassOrNull(Reg))
604 return;
606 MRI->setRegClass(Reg,
607 SpvType ? GR->getRegClass(SpvType) : &SPIRV::iIDRegClass);
608}
609
610static Register buildMemSemanticsReg(Register SemanticsRegister,
611 Register PtrRegister, unsigned &Semantics,
612 MachineIRBuilder &MIRBuilder,
614 if (SemanticsRegister.isValid()) {
615 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
616 std::memory_order Order =
617 static_cast<std::memory_order>(getIConstVal(SemanticsRegister, MRI));
618 Semantics =
619 getSPIRVMemSemantics(Order) |
621 if (static_cast<unsigned>(Order) == Semantics) {
622 MRI->setRegClass(SemanticsRegister, &SPIRV::iIDRegClass);
623 return SemanticsRegister;
624 }
625 }
626 return buildConstantIntReg32(Semantics, MIRBuilder, GR);
627}
628
629static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode,
631 Register TypeReg,
632 ArrayRef<uint32_t> ImmArgs = {}) {
633 auto MIB = MIRBuilder.buildInstr(Opcode);
634 if (TypeReg.isValid())
635 MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
636 unsigned Sz = Call->Arguments.size() - ImmArgs.size();
637 for (unsigned i = 0; i < Sz; ++i)
638 MIB.addUse(Call->Arguments[i]);
639 for (uint32_t ImmArg : ImmArgs)
640 MIB.addImm(ImmArg);
641 return true;
642}
643
644/// Helper function for translating atomic init to OpStore.
646 MachineIRBuilder &MIRBuilder) {
647 if (Call->isSpirvOp())
648 return buildOpFromWrapper(MIRBuilder, SPIRV::OpStore, Call, Register(0));
649
650 assert(Call->Arguments.size() == 2 &&
651 "Need 2 arguments for atomic init translation");
652 MIRBuilder.buildInstr(SPIRV::OpStore)
653 .addUse(Call->Arguments[0])
654 .addUse(Call->Arguments[1]);
655 return true;
656}
657
658/// Helper function for building an atomic load instruction.
660 MachineIRBuilder &MIRBuilder,
662 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
663 if (Call->isSpirvOp())
664 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicLoad, Call, TypeReg);
665
666 Register PtrRegister = Call->Arguments[0];
667 // TODO: if true insert call to __translate_ocl_memory_sccope before
668 // OpAtomicLoad and the function implementation. We can use Translator's
669 // output for transcoding/atomic_explicit_arguments.cl as an example.
670 Register ScopeRegister =
671 Call->Arguments.size() > 1
672 ? Call->Arguments[1]
673 : buildConstantIntReg32(SPIRV::Scope::Device, MIRBuilder, GR);
674 Register MemSemanticsReg;
675 if (Call->Arguments.size() > 2) {
676 // TODO: Insert call to __translate_ocl_memory_order before OpAtomicLoad.
677 MemSemanticsReg = Call->Arguments[2];
678 } else {
679 int Semantics =
680 SPIRV::MemorySemantics::SequentiallyConsistent |
682 MemSemanticsReg = buildConstantIntReg32(Semantics, MIRBuilder, GR);
683 }
684
685 MIRBuilder.buildInstr(SPIRV::OpAtomicLoad)
686 .addDef(Call->ReturnRegister)
687 .addUse(TypeReg)
688 .addUse(PtrRegister)
689 .addUse(ScopeRegister)
690 .addUse(MemSemanticsReg);
691 return true;
692}
693
694/// Helper function for building an atomic store instruction.
696 MachineIRBuilder &MIRBuilder,
698 if (Call->isSpirvOp())
699 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicStore, Call,
700 Register(0));
701
702 Register ScopeRegister =
703 buildConstantIntReg32(SPIRV::Scope::Device, MIRBuilder, GR);
704 Register PtrRegister = Call->Arguments[0];
705 int Semantics =
706 SPIRV::MemorySemantics::SequentiallyConsistent |
708 Register MemSemanticsReg = buildConstantIntReg32(Semantics, MIRBuilder, GR);
709 MIRBuilder.buildInstr(SPIRV::OpAtomicStore)
710 .addUse(PtrRegister)
711 .addUse(ScopeRegister)
712 .addUse(MemSemanticsReg)
713 .addUse(Call->Arguments[1]);
714 return true;
715}
716
717/// Helper function for building an atomic compare-exchange instruction.
719 const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin,
720 unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
721 if (Call->isSpirvOp())
722 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
723 GR->getSPIRVTypeID(Call->ReturnType));
724
725 bool IsCmpxchg = Call->Builtin->Name.contains("cmpxchg");
726 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
727
728 Register ObjectPtr = Call->Arguments[0]; // Pointer (volatile A *object.)
729 Register ExpectedArg = Call->Arguments[1]; // Comparator (C* expected).
730 Register Desired = Call->Arguments[2]; // Value (C Desired).
731 SPIRVTypeInst SpvDesiredTy = GR->getSPIRVTypeForVReg(Desired);
732 LLT DesiredLLT = MRI->getType(Desired);
733
734 assert(GR->getSPIRVTypeForVReg(ObjectPtr)->getOpcode() ==
735 SPIRV::OpTypePointer);
736 unsigned ExpectedType = GR->getSPIRVTypeForVReg(ExpectedArg)->getOpcode();
737 (void)ExpectedType;
738 assert(IsCmpxchg ? ExpectedType == SPIRV::OpTypeInt
739 : ExpectedType == SPIRV::OpTypePointer);
740 assert(GR->isScalarOfType(Desired, SPIRV::OpTypeInt));
741
742 SPIRVTypeInst SpvObjectPtrTy = GR->getSPIRVTypeForVReg(ObjectPtr);
743 assert(SpvObjectPtrTy->getOperand(2).isReg() && "SPIRV type is expected");
744 auto StorageClass = static_cast<SPIRV::StorageClass::StorageClass>(
745 SpvObjectPtrTy->getOperand(1).getImm());
746 auto MemSemStorage = getMemSemanticsForStorageClass(StorageClass);
747
748 Register MemSemEqualReg;
749 Register MemSemUnequalReg;
750 uint64_t MemSemEqual =
751 IsCmpxchg
752 ? SPIRV::MemorySemantics::None
753 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
754 uint64_t MemSemUnequal =
755 IsCmpxchg
756 ? SPIRV::MemorySemantics::None
757 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
758 if (Call->Arguments.size() >= 4) {
759 assert(Call->Arguments.size() >= 5 &&
760 "Need 5+ args for explicit atomic cmpxchg");
761 auto MemOrdEq =
762 static_cast<std::memory_order>(getIConstVal(Call->Arguments[3], MRI));
763 auto MemOrdNeq =
764 static_cast<std::memory_order>(getIConstVal(Call->Arguments[4], MRI));
765 MemSemEqual = getSPIRVMemSemantics(MemOrdEq) | MemSemStorage;
766 MemSemUnequal = getSPIRVMemSemantics(MemOrdNeq) | MemSemStorage;
767 if (static_cast<unsigned>(MemOrdEq) == MemSemEqual)
768 MemSemEqualReg = Call->Arguments[3];
769 if (static_cast<unsigned>(MemOrdNeq) == MemSemEqual)
770 MemSemUnequalReg = Call->Arguments[4];
771 }
772 if (!MemSemEqualReg.isValid())
773 MemSemEqualReg = buildConstantIntReg32(MemSemEqual, MIRBuilder, GR);
774 if (!MemSemUnequalReg.isValid())
775 MemSemUnequalReg = buildConstantIntReg32(MemSemUnequal, MIRBuilder, GR);
776
777 Register ScopeReg;
778 auto Scope = IsCmpxchg ? SPIRV::Scope::Workgroup : SPIRV::Scope::Device;
779 if (Call->Arguments.size() >= 6) {
780 assert(Call->Arguments.size() == 6 &&
781 "Extra args for explicit atomic cmpxchg");
782 auto ClScope = static_cast<SPIRV::CLMemoryScope>(
783 getIConstVal(Call->Arguments[5], MRI));
784 Scope = getSPIRVScope(ClScope);
785 if (ClScope == static_cast<unsigned>(Scope))
786 ScopeReg = Call->Arguments[5];
787 }
788 if (!ScopeReg.isValid())
789 ScopeReg = buildConstantIntReg32(Scope, MIRBuilder, GR);
790
791 Register Expected = IsCmpxchg
792 ? ExpectedArg
793 : buildLoadInst(SpvDesiredTy, ExpectedArg, MIRBuilder,
794 GR, LLT::scalar(64));
795 MRI->setType(Expected, DesiredLLT);
796 Register Tmp = !IsCmpxchg ? MRI->createGenericVirtualRegister(DesiredLLT)
797 : Call->ReturnRegister;
798 if (!MRI->getRegClassOrNull(Tmp))
799 MRI->setRegClass(Tmp, GR->getRegClass(SpvDesiredTy));
800 GR->assignSPIRVTypeToVReg(SpvDesiredTy, Tmp, MIRBuilder.getMF());
801
802 MIRBuilder.buildInstr(Opcode)
803 .addDef(Tmp)
804 .addUse(GR->getSPIRVTypeID(SpvDesiredTy))
805 .addUse(ObjectPtr)
806 .addUse(ScopeReg)
807 .addUse(MemSemEqualReg)
808 .addUse(MemSemUnequalReg)
809 .addUse(Desired)
811 if (!IsCmpxchg) {
812 MIRBuilder.buildInstr(SPIRV::OpStore).addUse(ExpectedArg).addUse(Tmp);
813 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Call->ReturnRegister, Tmp, Expected);
814 }
815 return true;
816}
817
818/// Helper function for building atomic instructions.
819static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
820 MachineIRBuilder &MIRBuilder,
822 if (Call->isSpirvOp())
823 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
824 GR->getSPIRVTypeID(Call->ReturnType));
825
826 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
827 Register ScopeRegister =
828 Call->Arguments.size() >= 4 ? Call->Arguments[3] : Register();
829
830 assert(Call->Arguments.size() <= 4 &&
831 "Too many args for explicit atomic RMW");
832 ScopeRegister = buildScopeReg(ScopeRegister, SPIRV::Scope::Workgroup,
833 MIRBuilder, GR, MRI);
834
835 Register PtrRegister = Call->Arguments[0];
836 unsigned Semantics = SPIRV::MemorySemantics::None;
837 Register MemSemanticsReg =
838 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
839 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
840 Semantics, MIRBuilder, GR);
841 Register ValueReg = Call->Arguments[1];
842 Register ValueTypeReg = GR->getSPIRVTypeID(Call->ReturnType);
843 // support cl_ext_float_atomics
844 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeFloat) {
845 if (Opcode == SPIRV::OpAtomicIAdd) {
846 Opcode = SPIRV::OpAtomicFAddEXT;
847 } else if (Opcode == SPIRV::OpAtomicISub) {
848 // Translate OpAtomicISub applied to a floating type argument to
849 // OpAtomicFAddEXT with the negative value operand
850 Opcode = SPIRV::OpAtomicFAddEXT;
851 Register NegValueReg =
852 MRI->createGenericVirtualRegister(MRI->getType(ValueReg));
853 MRI->setRegClass(NegValueReg, GR->getRegClass(Call->ReturnType));
854 GR->assignSPIRVTypeToVReg(Call->ReturnType, NegValueReg,
855 MIRBuilder.getMF());
856 MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
857 .addDef(NegValueReg)
858 .addUse(ValueReg);
859 updateRegType(NegValueReg, nullptr, Call->ReturnType, GR, MIRBuilder,
860 MIRBuilder.getMF().getRegInfo());
861 ValueReg = NegValueReg;
862 }
863 }
864 MIRBuilder.buildInstr(Opcode)
865 .addDef(Call->ReturnRegister)
866 .addUse(ValueTypeReg)
867 .addUse(PtrRegister)
868 .addUse(ScopeRegister)
869 .addUse(MemSemanticsReg)
870 .addUse(ValueReg);
871 return true;
872}
873
874/// Helper function for building an atomic floating-type instruction.
876 unsigned Opcode,
877 MachineIRBuilder &MIRBuilder,
879 assert(Call->Arguments.size() == 4 &&
880 "Wrong number of atomic floating-type builtin");
881 Register PtrReg = Call->Arguments[0];
882 Register ScopeReg = Call->Arguments[1];
883 Register MemSemanticsReg = Call->Arguments[2];
884 Register ValueReg = Call->Arguments[3];
885 MIRBuilder.buildInstr(Opcode)
886 .addDef(Call->ReturnRegister)
887 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
888 .addUse(PtrReg)
889 .addUse(ScopeReg)
890 .addUse(MemSemanticsReg)
891 .addUse(ValueReg);
892 return true;
893}
894
895/// Helper function for building atomic flag instructions (e.g.
896/// OpAtomicFlagTestAndSet).
898 unsigned Opcode, MachineIRBuilder &MIRBuilder,
900 bool IsSet = Opcode == SPIRV::OpAtomicFlagTestAndSet;
901 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
902 if (Call->isSpirvOp())
903 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
904 IsSet ? TypeReg : Register(0));
905
906 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
907 Register PtrRegister = Call->Arguments[0];
908 unsigned Semantics = SPIRV::MemorySemantics::SequentiallyConsistent;
909 Register MemSemanticsReg =
910 Call->Arguments.size() >= 2 ? Call->Arguments[1] : Register();
911 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
912 Semantics, MIRBuilder, GR);
913
914 assert((Opcode != SPIRV::OpAtomicFlagClear ||
915 (Semantics != SPIRV::MemorySemantics::Acquire &&
916 Semantics != SPIRV::MemorySemantics::AcquireRelease)) &&
917 "Invalid memory order argument!");
918
919 Register ScopeRegister =
920 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
921 ScopeRegister =
922 buildScopeReg(ScopeRegister, SPIRV::Scope::Device, MIRBuilder, GR, MRI);
923
924 auto MIB = MIRBuilder.buildInstr(Opcode);
925 if (IsSet)
926 MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
927
928 MIB.addUse(PtrRegister).addUse(ScopeRegister).addUse(MemSemanticsReg);
929 return true;
930}
931
932/// Helper function for building barriers, i.e., memory/control ordering
933/// operations.
934static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
935 MachineIRBuilder &MIRBuilder,
937 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
938 const auto *ST =
939 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
940 if ((Opcode == SPIRV::OpControlBarrierArriveINTEL ||
941 Opcode == SPIRV::OpControlBarrierWaitINTEL) &&
942 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_split_barrier)) {
943 std::string DiagMsg = std::string(Builtin->Name) +
944 ": the builtin requires the following SPIR-V "
945 "extension: SPV_INTEL_split_barrier";
946 report_fatal_error(DiagMsg.c_str(), false);
947 }
948
949 if (Call->isSpirvOp())
950 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
951
952 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
953 unsigned MemFlags = getIConstVal(Call->Arguments[0], MRI);
954 unsigned MemSemantics = SPIRV::MemorySemantics::None;
955
956 if (MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE)
957 MemSemantics |= SPIRV::MemorySemantics::WorkgroupMemory;
958
959 if (MemFlags & SPIRV::CLK_GLOBAL_MEM_FENCE)
960 MemSemantics |= SPIRV::MemorySemantics::CrossWorkgroupMemory;
961
962 if (MemFlags & SPIRV::CLK_IMAGE_MEM_FENCE)
963 MemSemantics |= SPIRV::MemorySemantics::ImageMemory;
964
965 if (Opcode == SPIRV::OpMemoryBarrier)
966 MemSemantics = getSPIRVMemSemantics(static_cast<std::memory_order>(
967 getIConstVal(Call->Arguments[1], MRI))) |
968 MemSemantics;
969 else if (Opcode == SPIRV::OpControlBarrierArriveINTEL)
970 MemSemantics |= SPIRV::MemorySemantics::Release;
971 else if (Opcode == SPIRV::OpControlBarrierWaitINTEL)
972 MemSemantics |= SPIRV::MemorySemantics::Acquire;
973 else
974 MemSemantics |= SPIRV::MemorySemantics::SequentiallyConsistent;
975
976 Register MemSemanticsReg =
977 MemFlags == MemSemantics
978 ? Call->Arguments[0]
979 : buildConstantIntReg32(MemSemantics, MIRBuilder, GR);
980 Register ScopeReg;
981 SPIRV::Scope::Scope Scope = SPIRV::Scope::Workgroup;
982 SPIRV::Scope::Scope MemScope = Scope;
983 if (Call->Arguments.size() >= 2) {
984 assert(
985 ((Opcode != SPIRV::OpMemoryBarrier && Call->Arguments.size() == 2) ||
986 (Opcode == SPIRV::OpMemoryBarrier && Call->Arguments.size() == 3)) &&
987 "Extra args for explicitly scoped barrier");
988 Register ScopeArg = (Opcode == SPIRV::OpMemoryBarrier) ? Call->Arguments[2]
989 : Call->Arguments[1];
990 SPIRV::CLMemoryScope CLScope =
991 static_cast<SPIRV::CLMemoryScope>(getIConstVal(ScopeArg, MRI));
992 MemScope = getSPIRVScope(CLScope);
993 if (!(MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE) ||
994 (Opcode == SPIRV::OpMemoryBarrier))
995 Scope = MemScope;
996 if (CLScope == static_cast<unsigned>(Scope))
997 ScopeReg = Call->Arguments[1];
998 }
999
1000 if (!ScopeReg.isValid())
1001 ScopeReg = buildConstantIntReg32(Scope, MIRBuilder, GR);
1002
1003 auto MIB = MIRBuilder.buildInstr(Opcode).addUse(ScopeReg);
1004 if (Opcode != SPIRV::OpMemoryBarrier)
1005 MIB.addUse(buildConstantIntReg32(MemScope, MIRBuilder, GR));
1006 MIB.addUse(MemSemanticsReg);
1007 return true;
1008}
1009
1010/// Helper function for building extended bit operations.
1012 unsigned Opcode,
1013 MachineIRBuilder &MIRBuilder,
1014 SPIRVGlobalRegistry *GR) {
1015 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1016 const auto *ST =
1017 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
1018 if ((Opcode == SPIRV::OpBitFieldInsert ||
1019 Opcode == SPIRV::OpBitFieldSExtract ||
1020 Opcode == SPIRV::OpBitFieldUExtract || Opcode == SPIRV::OpBitReverse) &&
1021 !ST->canUseExtension(SPIRV::Extension::SPV_KHR_bit_instructions)) {
1022 std::string DiagMsg = std::string(Builtin->Name) +
1023 ": the builtin requires the following SPIR-V "
1024 "extension: SPV_KHR_bit_instructions";
1025 report_fatal_error(DiagMsg.c_str(), false);
1026 }
1027
1028 // Generate SPIRV instruction accordingly.
1029 if (Call->isSpirvOp())
1030 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1031 GR->getSPIRVTypeID(Call->ReturnType));
1032
1033 auto MIB = MIRBuilder.buildInstr(Opcode)
1034 .addDef(Call->ReturnRegister)
1035 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1036 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1037 MIB.addUse(Call->Arguments[i]);
1038
1039 return true;
1040}
1041
1042/// Helper function for building Intel's bindless image instructions.
1044 unsigned Opcode,
1045 MachineIRBuilder &MIRBuilder,
1046 SPIRVGlobalRegistry *GR) {
1047 // Generate SPIRV instruction accordingly.
1048 if (Call->isSpirvOp())
1049 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1050 GR->getSPIRVTypeID(Call->ReturnType));
1051
1052 MIRBuilder.buildInstr(Opcode)
1053 .addDef(Call->ReturnRegister)
1054 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1055 .addUse(Call->Arguments[0]);
1056
1057 return true;
1058}
1059
1060/// Helper function for building Intel's OpBitwiseFunctionINTEL instruction.
1062 const SPIRV::IncomingCall *Call, unsigned Opcode,
1063 MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
1064 // Generate SPIRV instruction accordingly.
1065 if (Call->isSpirvOp())
1066 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1067 GR->getSPIRVTypeID(Call->ReturnType));
1068
1069 auto MIB = MIRBuilder.buildInstr(Opcode)
1070 .addDef(Call->ReturnRegister)
1071 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1072 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1073 MIB.addUse(Call->Arguments[i]);
1074
1075 return true;
1076}
1077
1079 unsigned Opcode,
1080 MachineIRBuilder &MIRBuilder,
1081 SPIRVGlobalRegistry *GR) {
1082 if (Call->isSpirvOp())
1083 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1084 GR->getSPIRVTypeID(Call->ReturnType));
1085
1086 auto MIB = MIRBuilder.buildInstr(Opcode)
1087 .addDef(Call->ReturnRegister)
1088 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1089 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1090 MIB.addUse(Call->Arguments[i]);
1091
1092 return true;
1093}
1094
1095/// Helper function for building Intel's 2d block io instructions.
1097 unsigned Opcode,
1098 MachineIRBuilder &MIRBuilder,
1099 SPIRVGlobalRegistry *GR) {
1100 // Generate SPIRV instruction accordingly.
1101 if (Call->isSpirvOp())
1102 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
1103
1104 auto MIB = MIRBuilder.buildInstr(Opcode)
1105 .addDef(Call->ReturnRegister)
1106 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1107 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1108 MIB.addUse(Call->Arguments[i]);
1109
1110 return true;
1111}
1112
1113static bool buildPipeInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
1114 unsigned Scope, MachineIRBuilder &MIRBuilder,
1115 SPIRVGlobalRegistry *GR) {
1116 switch (Opcode) {
1117 case SPIRV::OpCommitReadPipe:
1118 case SPIRV::OpCommitWritePipe:
1119 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
1120 case SPIRV::OpGroupCommitReadPipe:
1121 case SPIRV::OpGroupCommitWritePipe:
1122 case SPIRV::OpGroupReserveReadPipePackets:
1123 case SPIRV::OpGroupReserveWritePipePackets: {
1124 Register ScopeConstReg =
1125 MIRBuilder.buildConstant(LLT::scalar(32), Scope).getReg(0);
1126 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1127 MRI->setRegClass(ScopeConstReg, &SPIRV::iIDRegClass);
1129 MIB = MIRBuilder.buildInstr(Opcode);
1130 // Add Return register and type.
1131 if (Opcode == SPIRV::OpGroupReserveReadPipePackets ||
1132 Opcode == SPIRV::OpGroupReserveWritePipePackets)
1133 MIB.addDef(Call->ReturnRegister)
1134 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1135
1136 MIB.addUse(ScopeConstReg);
1137 for (unsigned int i = 0; i < Call->Arguments.size(); ++i)
1138 MIB.addUse(Call->Arguments[i]);
1139
1140 return true;
1141 }
1142 default:
1143 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1144 GR->getSPIRVTypeID(Call->ReturnType));
1145 }
1146}
1147
1148static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim) {
1149 switch (dim) {
1150 case SPIRV::Dim::DIM_1D:
1151 case SPIRV::Dim::DIM_Buffer:
1152 return 1;
1153 case SPIRV::Dim::DIM_2D:
1154 case SPIRV::Dim::DIM_Cube:
1155 case SPIRV::Dim::DIM_Rect:
1156 return 2;
1157 case SPIRV::Dim::DIM_3D:
1158 return 3;
1159 default:
1160 report_fatal_error("Cannot get num components for given Dim");
1161 }
1162}
1163
1164/// Helper function for obtaining the number of size components.
1165static unsigned getNumSizeComponents(SPIRVTypeInst imgType) {
1166 assert(imgType->getOpcode() == SPIRV::OpTypeImage);
1167 auto dim = static_cast<SPIRV::Dim::Dim>(imgType->getOperand(2).getImm());
1168 unsigned numComps = getNumComponentsForDim(dim);
1169 bool arrayed = imgType->getOperand(4).getImm() == 1;
1170 return arrayed ? numComps + 1 : numComps;
1171}
1172
1173static bool builtinMayNeedPromotionToVec(uint32_t BuiltinNumber) {
1174 switch (BuiltinNumber) {
1175 case SPIRV::OpenCLExtInst::s_min:
1176 case SPIRV::OpenCLExtInst::u_min:
1177 case SPIRV::OpenCLExtInst::s_max:
1178 case SPIRV::OpenCLExtInst::u_max:
1179 case SPIRV::OpenCLExtInst::fmax:
1180 case SPIRV::OpenCLExtInst::fmin:
1181 case SPIRV::OpenCLExtInst::fmax_common:
1182 case SPIRV::OpenCLExtInst::fmin_common:
1183 case SPIRV::OpenCLExtInst::s_clamp:
1184 case SPIRV::OpenCLExtInst::fclamp:
1185 case SPIRV::OpenCLExtInst::u_clamp:
1186 case SPIRV::OpenCLExtInst::mix:
1187 case SPIRV::OpenCLExtInst::step:
1188 case SPIRV::OpenCLExtInst::smoothstep:
1189 return true;
1190 default:
1191 break;
1192 }
1193 return false;
1194}
1195
1196//===----------------------------------------------------------------------===//
1197// Implementation functions for each builtin group
1198//===----------------------------------------------------------------------===//
1199
1202 MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
1203
1204 Register ReturnTypeId = GR->getSPIRVTypeID(Call->ReturnType);
1205 unsigned ResultElementCount =
1206 GR->getScalarOrVectorComponentCount(ReturnTypeId);
1207 bool MayNeedPromotionToVec =
1208 builtinMayNeedPromotionToVec(BuiltinNumber) && ResultElementCount > 1;
1209
1210 if (!MayNeedPromotionToVec)
1211 return {Call->Arguments.begin(), Call->Arguments.end()};
1212
1214 for (Register Argument : Call->Arguments) {
1215 Register VecArg = Argument;
1216 SPIRVTypeInst ArgumentType = GR->getSPIRVTypeForVReg(Argument);
1217 if (ArgumentType != Call->ReturnType) {
1218 VecArg = createVirtualRegister(Call->ReturnType, GR, MIRBuilder);
1219 auto VecSplat = MIRBuilder.buildInstr(SPIRV::OpCompositeConstruct)
1220 .addDef(VecArg)
1221 .addUse(ReturnTypeId);
1222 for (unsigned I = 0; I != ResultElementCount; ++I)
1223 VecSplat.addUse(Argument);
1224 }
1225 Arguments.push_back(VecArg);
1226 }
1227 return Arguments;
1228}
1229
1231 MachineIRBuilder &MIRBuilder,
1232 SPIRVGlobalRegistry *GR, const CallBase &CB) {
1233 // Lookup the extended instruction number in the TableGen records.
1234 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1236 SPIRV::lookupExtendedBuiltin(Builtin->Name, Builtin->Set)->Number;
1237 // fmin_common and fmax_common are now deprecated, and we should use fmin and
1238 // fmax with NotInf and NotNaN flags instead. Keep original number to add
1239 // later the NoNans and NoInfs flags.
1240 uint32_t OrigNumber = Number;
1241 const SPIRVSubtarget &ST =
1242 cast<SPIRVSubtarget>(MIRBuilder.getMF().getSubtarget());
1243 if (ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2) &&
1244 (Number == SPIRV::OpenCLExtInst::fmin_common ||
1245 Number == SPIRV::OpenCLExtInst::fmax_common)) {
1246 Number = (Number == SPIRV::OpenCLExtInst::fmin_common)
1247 ? SPIRV::OpenCLExtInst::fmin
1248 : SPIRV::OpenCLExtInst::fmax;
1249 }
1250
1251 Register ReturnTypeId = GR->getSPIRVTypeID(Call->ReturnType);
1253 getBuiltinCallArguments(Call, Number, MIRBuilder, GR);
1254
1256 if (ST.canUseExtension(SPIRV::Extension::SPV_KHR_fma) &&
1257 Number == SPIRV::OpenCLExtInst::fma) {
1258 // Use the SPIR-V fma instruction instead of the OpenCL extended
1259 // instruction if the extension is available.
1260 MIB = MIRBuilder.buildInstr(SPIRV::OpFmaKHR)
1261 .addDef(Call->ReturnRegister)
1262 .addUse(ReturnTypeId);
1263 } else {
1264 // Build extended instruction.
1265 MIB = MIRBuilder.buildInstr(SPIRV::OpExtInst)
1266 .addDef(Call->ReturnRegister)
1267 .addUse(ReturnTypeId)
1268 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
1269 .addImm(Number);
1270 }
1271
1273 MIB.addUse(Argument);
1274
1275 MIB.getInstr()->copyIRFlags(CB);
1276 if (OrigNumber == SPIRV::OpenCLExtInst::fmin_common ||
1277 OrigNumber == SPIRV::OpenCLExtInst::fmax_common) {
1278 // Add NoNans and NoInfs flags to fmin/fmax instruction.
1281 }
1282
1283 // Derive fast-math flags from nofpclass attributes on the called function.
1284 // FPFastMathMode decoration is valid on ExtInst in Kernel environments
1285 // (SPIR-V core) or with SPV_KHR_float_controls2 for any environment.
1286 if (ST.isKernel() ||
1287 ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2)) {
1288 if (const Function *F = CB.getCalledFunction()) {
1289 bool AddNoNan = CB.getRetNoFPClass() & fcNan;
1290 bool AddNoInf = CB.getRetNoFPClass() & fcInf;
1291 FunctionType *FTy = F->getFunctionType();
1292 for (unsigned I = 0, E = FTy->getNumParams();
1293 I != E && (AddNoNan || AddNoInf); ++I) {
1294 if (!FTy->getParamType(I)->isFloatingPointTy())
1295 continue;
1296 FPClassTest ArgTest = CB.getParamNoFPClass(I);
1297 AddNoNan = AddNoNan && ArgTest & fcNan;
1298 AddNoInf = AddNoInf && ArgTest & fcInf;
1299 }
1300 if (AddNoNan)
1302 if (AddNoInf)
1304 }
1305 }
1306
1307 return true;
1308}
1309
1311 MachineIRBuilder &MIRBuilder,
1312 SPIRVGlobalRegistry *GR) {
1313 // Lookup the instruction opcode in the TableGen records.
1314 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1315 unsigned Opcode =
1316 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1317
1318 Register CompareRegister;
1319 SPIRVTypeInst RelationType = nullptr;
1320 std::tie(CompareRegister, RelationType) =
1321 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
1322
1323 // Build relational instruction.
1324 auto MIB = MIRBuilder.buildInstr(Opcode)
1325 .addDef(CompareRegister)
1326 .addUse(GR->getSPIRVTypeID(RelationType));
1327
1328 for (auto Argument : Call->Arguments)
1329 MIB.addUse(Argument);
1330
1331 // Build select instruction.
1332 return buildSelectInst(MIRBuilder, Call->ReturnRegister, CompareRegister,
1333 Call->ReturnType, GR);
1334}
1335
1337 MachineIRBuilder &MIRBuilder,
1338 SPIRVGlobalRegistry *GR) {
1339 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1340 const SPIRV::GroupBuiltin *GroupBuiltin =
1341 SPIRV::lookupGroupBuiltin(Builtin->Name);
1342
1343 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1344 if (Call->isSpirvOp()) {
1345 if (GroupBuiltin->NoGroupOperation) {
1347 if (GroupBuiltin->Opcode ==
1348 SPIRV::OpSubgroupMatrixMultiplyAccumulateINTEL &&
1349 Call->Arguments.size() > 4)
1350 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[4], MRI));
1351 return buildOpFromWrapper(MIRBuilder, GroupBuiltin->Opcode, Call,
1352 GR->getSPIRVTypeID(Call->ReturnType), ImmArgs);
1353 }
1354
1355 // Group Operation is a literal
1356 Register GroupOpReg = Call->Arguments[1];
1357 const MachineInstr *MI = getDefInstrMaybeConstant(GroupOpReg, MRI);
1358 if (!MI || MI->getOpcode() != TargetOpcode::G_CONSTANT)
1360 "Group Operation parameter must be an integer constant");
1361 uint64_t GrpOp = MI->getOperand(1).getCImm()->getValue().getZExtValue();
1362 Register ScopeReg = Call->Arguments[0];
1363 auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode)
1364 .addDef(Call->ReturnRegister)
1365 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1366 .addUse(ScopeReg)
1367 .addImm(GrpOp);
1368 for (unsigned i = 2; i < Call->Arguments.size(); ++i)
1369 MIB.addUse(Call->Arguments[i]);
1370 return true;
1371 }
1372
1373 Register Arg0;
1374 if (GroupBuiltin->HasBoolArg) {
1375 SPIRVTypeInst BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder, true);
1376 Register BoolReg = Call->Arguments[0];
1377 SPIRVTypeInst BoolRegType = GR->getSPIRVTypeForVReg(BoolReg);
1378 if (!BoolRegType)
1379 report_fatal_error("Can't find a register's type definition");
1380 MachineInstr *ArgInstruction = getDefInstrMaybeConstant(BoolReg, MRI);
1381 if (ArgInstruction->getOpcode() == TargetOpcode::G_CONSTANT) {
1382 if (BoolRegType->getOpcode() != SPIRV::OpTypeBool)
1383 Arg0 = GR->buildConstantInt(getIConstVal(BoolReg, MRI), MIRBuilder,
1384 BoolType, true);
1385 } else {
1386 if (BoolRegType->getOpcode() == SPIRV::OpTypeInt) {
1388 MRI->setRegClass(Arg0, &SPIRV::iIDRegClass);
1389 GR->assignSPIRVTypeToVReg(BoolType, Arg0, MIRBuilder.getMF());
1390 MIRBuilder.buildICmp(
1391 CmpInst::ICMP_NE, Arg0, BoolReg,
1392 GR->buildConstantInt(0, MIRBuilder, BoolRegType, true));
1393 updateRegType(Arg0, nullptr, BoolType, GR, MIRBuilder,
1394 MIRBuilder.getMF().getRegInfo());
1395 } else if (BoolRegType->getOpcode() != SPIRV::OpTypeBool) {
1396 report_fatal_error("Expect a boolean argument");
1397 }
1398 // if BoolReg is a boolean register, we don't need to do anything
1399 }
1400 }
1401
1402 Register GroupResultRegister = Call->ReturnRegister;
1403 SPIRVTypeInst GroupResultType = Call->ReturnType;
1404
1405 // TODO: maybe we need to check whether the result type is already boolean
1406 // and in this case do not insert select instruction.
1407 const bool HasBoolReturnTy =
1408 GroupBuiltin->IsElect || GroupBuiltin->IsAllOrAny ||
1409 GroupBuiltin->IsAllEqual || GroupBuiltin->IsLogical ||
1410 GroupBuiltin->IsInverseBallot || GroupBuiltin->IsBallotBitExtract;
1411
1412 if (HasBoolReturnTy)
1413 std::tie(GroupResultRegister, GroupResultType) =
1414 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
1415
1416 auto Scope = Builtin->Name.starts_with("sub_group") ? SPIRV::Scope::Subgroup
1417 : SPIRV::Scope::Workgroup;
1418 Register ScopeRegister = buildConstantIntReg32(Scope, MIRBuilder, GR);
1419
1420 Register VecReg;
1421 if (GroupBuiltin->Opcode == SPIRV::OpGroupBroadcast &&
1422 Call->Arguments.size() > 2) {
1423 // For OpGroupBroadcast "LocalId must be an integer datatype. It must be a
1424 // scalar, a vector with 2 components, or a vector with 3 components.",
1425 // meaning that we must create a vector from the function arguments if
1426 // it's a work_group_broadcast(val, local_id_x, local_id_y) or
1427 // work_group_broadcast(val, local_id_x, local_id_y, local_id_z) call.
1428 Register ElemReg = Call->Arguments[1];
1429 SPIRVTypeInst ElemType = GR->getSPIRVTypeForVReg(ElemReg);
1430 if (!ElemType || ElemType->getOpcode() != SPIRV::OpTypeInt)
1431 report_fatal_error("Expect an integer <LocalId> argument");
1432 unsigned VecLen = Call->Arguments.size() - 1;
1433 VecReg = MRI->createGenericVirtualRegister(
1434 LLT::fixed_vector(VecLen, MRI->getType(ElemReg)));
1435 MRI->setRegClass(VecReg, &SPIRV::vIDRegClass);
1436 SPIRVTypeInst VecType =
1437 GR->getOrCreateSPIRVVectorType(ElemType, VecLen, MIRBuilder, true);
1438 GR->assignSPIRVTypeToVReg(VecType, VecReg, MIRBuilder.getMF());
1439 auto MIB =
1440 MIRBuilder.buildInstr(TargetOpcode::G_BUILD_VECTOR).addDef(VecReg);
1441 for (unsigned i = 1; i < Call->Arguments.size(); i++) {
1442 MIB.addUse(Call->Arguments[i]);
1443 setRegClassIfNull(Call->Arguments[i], MRI, GR);
1444 }
1445 updateRegType(VecReg, nullptr, VecType, GR, MIRBuilder,
1446 MIRBuilder.getMF().getRegInfo());
1447 }
1448
1449 // Build work/sub group instruction.
1450 auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode)
1451 .addDef(GroupResultRegister)
1452 .addUse(GR->getSPIRVTypeID(GroupResultType))
1453 .addUse(ScopeRegister);
1454
1455 if (!GroupBuiltin->NoGroupOperation)
1456 MIB.addImm(GroupBuiltin->GroupOperation);
1457 if (Call->Arguments.size() > 0) {
1458 MIB.addUse(Arg0.isValid() ? Arg0 : Call->Arguments[0]);
1459 setRegClassIfNull(Call->Arguments[0], MRI, GR);
1460 if (VecReg.isValid())
1461 MIB.addUse(VecReg);
1462 else
1463 for (unsigned i = 1; i < Call->Arguments.size(); i++)
1464 MIB.addUse(Call->Arguments[i]);
1465 }
1466
1467 // Build select instruction.
1468 if (HasBoolReturnTy)
1469 buildSelectInst(MIRBuilder, Call->ReturnRegister, GroupResultRegister,
1470 Call->ReturnType, GR);
1471 return true;
1472}
1473
1475 MachineIRBuilder &MIRBuilder,
1476 SPIRVGlobalRegistry *GR) {
1477 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1478 MachineFunction &MF = MIRBuilder.getMF();
1479 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1480 const SPIRV::IntelSubgroupsBuiltin *IntelSubgroups =
1481 SPIRV::lookupIntelSubgroupsBuiltin(Builtin->Name);
1482
1483 if (IntelSubgroups->IsMedia &&
1484 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_media_block_io)) {
1485 std::string DiagMsg = std::string(Builtin->Name) +
1486 ": the builtin requires the following SPIR-V "
1487 "extension: SPV_INTEL_media_block_io";
1488 report_fatal_error(DiagMsg.c_str(), false);
1489 } else if (!IntelSubgroups->IsMedia &&
1490 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1491 std::string DiagMsg = std::string(Builtin->Name) +
1492 ": the builtin requires the following SPIR-V "
1493 "extension: SPV_INTEL_subgroups";
1494 report_fatal_error(DiagMsg.c_str(), false);
1495 }
1496
1497 uint32_t OpCode = IntelSubgroups->Opcode;
1498 if (Call->isSpirvOp()) {
1499 bool IsSet = OpCode != SPIRV::OpSubgroupBlockWriteINTEL &&
1500 OpCode != SPIRV::OpSubgroupImageBlockWriteINTEL &&
1501 OpCode != SPIRV::OpSubgroupImageMediaBlockWriteINTEL;
1502 return buildOpFromWrapper(MIRBuilder, OpCode, Call,
1503 IsSet ? GR->getSPIRVTypeID(Call->ReturnType)
1504 : Register(0));
1505 }
1506
1507 if (IntelSubgroups->IsBlock) {
1508 // Minimal number or arguments set in TableGen records is 1
1509 if (SPIRVTypeInst Arg0Type = GR->getSPIRVTypeForVReg(Call->Arguments[0])) {
1510 if (Arg0Type->getOpcode() == SPIRV::OpTypeImage) {
1511 // TODO: add required validation from the specification:
1512 // "'Image' must be an object whose type is OpTypeImage with a 'Sampled'
1513 // operand of 0 or 2. If the 'Sampled' operand is 2, then some
1514 // dimensions require a capability."
1515 switch (OpCode) {
1516 case SPIRV::OpSubgroupBlockReadINTEL:
1517 OpCode = SPIRV::OpSubgroupImageBlockReadINTEL;
1518 break;
1519 case SPIRV::OpSubgroupBlockWriteINTEL:
1520 OpCode = SPIRV::OpSubgroupImageBlockWriteINTEL;
1521 break;
1522 }
1523 }
1524 }
1525 }
1526
1527 // TODO: opaque pointers types should be eventually resolved in such a way
1528 // that validation of block read is enabled with respect to the following
1529 // specification requirement:
1530 // "'Result Type' may be a scalar or vector type, and its component type must
1531 // be equal to the type pointed to by 'Ptr'."
1532 // For example, function parameter type should not be default i8 pointer, but
1533 // depend on the result type of the instruction where it is used as a pointer
1534 // argument of OpSubgroupBlockReadINTEL
1535
1536 // Build Intel subgroups instruction
1538 IntelSubgroups->IsWrite
1539 ? MIRBuilder.buildInstr(OpCode)
1540 : MIRBuilder.buildInstr(OpCode)
1541 .addDef(Call->ReturnRegister)
1542 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1543 for (size_t i = 0; i < Call->Arguments.size(); ++i)
1544 MIB.addUse(Call->Arguments[i]);
1545 return true;
1546}
1547
1549 MachineIRBuilder &MIRBuilder,
1550 SPIRVGlobalRegistry *GR) {
1551 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1552 MachineFunction &MF = MIRBuilder.getMF();
1553 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1554 if (!ST->canUseExtension(
1555 SPIRV::Extension::SPV_KHR_uniform_group_instructions)) {
1556 std::string DiagMsg = std::string(Builtin->Name) +
1557 ": the builtin requires the following SPIR-V "
1558 "extension: SPV_KHR_uniform_group_instructions";
1559 report_fatal_error(DiagMsg.c_str(), false);
1560 }
1561 const SPIRV::GroupUniformBuiltin *GroupUniform =
1562 SPIRV::lookupGroupUniformBuiltin(Builtin->Name);
1563 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1564
1565 Register GroupResultReg = Call->ReturnRegister;
1566 Register ScopeReg = Call->Arguments[0];
1567 Register ValueReg = Call->Arguments[2];
1568
1569 // Group Operation
1570 Register ConstGroupOpReg = Call->Arguments[1];
1571 const MachineInstr *Const = getDefInstrMaybeConstant(ConstGroupOpReg, MRI);
1572 if (!Const || Const->getOpcode() != TargetOpcode::G_CONSTANT)
1574 "expect a constant group operation for a uniform group instruction",
1575 false);
1576 const MachineOperand &ConstOperand = Const->getOperand(1);
1577 if (!ConstOperand.isCImm())
1578 report_fatal_error("uniform group instructions: group operation must be an "
1579 "integer constant",
1580 false);
1581
1582 auto MIB = MIRBuilder.buildInstr(GroupUniform->Opcode)
1583 .addDef(GroupResultReg)
1584 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1585 .addUse(ScopeReg);
1586 addNumImm(ConstOperand.getCImm()->getValue(), MIB);
1587 MIB.addUse(ValueReg);
1588
1589 return true;
1590}
1591
1593 MachineIRBuilder &MIRBuilder,
1594 SPIRVGlobalRegistry *GR) {
1595 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1596 MachineFunction &MF = MIRBuilder.getMF();
1597 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1598 if (!ST->canUseExtension(SPIRV::Extension::SPV_KHR_shader_clock)) {
1599 std::string DiagMsg = std::string(Builtin->Name) +
1600 ": the builtin requires the following SPIR-V "
1601 "extension: SPV_KHR_shader_clock";
1602 report_fatal_error(DiagMsg.c_str(), false);
1603 }
1604
1605 Register ResultReg = Call->ReturnRegister;
1606
1607 if (Builtin->Name == "__spirv_ReadClockKHR") {
1608 MIRBuilder.buildInstr(SPIRV::OpReadClockKHR)
1609 .addDef(ResultReg)
1610 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1611 .addUse(Call->Arguments[0]);
1612 } else {
1613 // Deduce the `Scope` operand from the builtin function name.
1614 SPIRV::Scope::Scope ScopeArg =
1616 .EndsWith("device", SPIRV::Scope::Scope::Device)
1617 .EndsWith("work_group", SPIRV::Scope::Scope::Workgroup)
1618 .EndsWith("sub_group", SPIRV::Scope::Scope::Subgroup);
1619 Register ScopeReg = buildConstantIntReg32(ScopeArg, MIRBuilder, GR);
1620
1621 MIRBuilder.buildInstr(SPIRV::OpReadClockKHR)
1622 .addDef(ResultReg)
1623 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1624 .addUse(ScopeReg);
1625 }
1626
1627 return true;
1628}
1629
1630// These queries ask for a single size_t result for a given dimension index,
1631// e.g. size_t get_global_id(uint dimindex). In SPIR-V, the builtins
1632// corresponding to these values are all vec3 types, so we need to extract the
1633// correct index or return DefaultValue (0 or 1 depending on the query). We also
1634// handle extending or truncating in case size_t does not match the expected
1635// result type's bitwidth.
1636//
1637// For a constant index >= 3 we generate:
1638// %res = OpConstant %SizeT DefaultValue
1639//
1640// For other indices we generate:
1641// %g = OpVariable %ptr_V3_SizeT Input
1642// OpDecorate %g BuiltIn XXX
1643// OpDecorate %g LinkageAttributes "__spirv_BuiltInXXX"
1644// OpDecorate %g Constant
1645// %loadedVec = OpLoad %V3_SizeT %g
1646//
1647// Then, if the index is constant < 3, we generate:
1648// %res = OpCompositeExtract %SizeT %loadedVec idx
1649// If the index is dynamic, we generate:
1650// %tmp = OpVectorExtractDynamic %SizeT %loadedVec %idx
1651// %cmp = OpULessThan %bool %idx %const_3
1652// %res = OpSelect %SizeT %cmp %tmp %const_<DefaultValue>
1653//
1654// If the bitwidth of %res does not match the expected return type, we add an
1655// extend or truncate.
1657 MachineIRBuilder &MIRBuilder,
1659 SPIRV::BuiltIn::BuiltIn BuiltinValue,
1660 uint64_t DefaultValue) {
1661 Register IndexRegister = Call->Arguments[0];
1662 const unsigned ResultWidth = Call->ReturnType->getOperand(1).getImm();
1663 const unsigned PointerSize = GR->getPointerSize();
1664 const SPIRVTypeInst PointerSizeType =
1665 GR->getOrCreateSPIRVIntegerType(PointerSize, MIRBuilder);
1666 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1667 auto IndexInstruction = getDefInstrMaybeConstant(IndexRegister, MRI);
1668
1669 // Set up the final register to do truncation or extension on at the end.
1670 Register ToTruncate = Call->ReturnRegister;
1671
1672 // If the index is constant, we can statically determine if it is in range.
1673 bool IsConstantIndex =
1674 IndexInstruction->getOpcode() == TargetOpcode::G_CONSTANT;
1675
1676 // If it's out of range (max dimension is 3), we can just return the constant
1677 // default value (0 or 1 depending on which query function).
1678 if (IsConstantIndex && getIConstVal(IndexRegister, MRI) >= 3) {
1679 Register DefaultReg = Call->ReturnRegister;
1680 if (PointerSize != ResultWidth) {
1681 DefaultReg = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1682 MRI->setRegClass(DefaultReg, &SPIRV::iIDRegClass);
1683 GR->assignSPIRVTypeToVReg(PointerSizeType, DefaultReg,
1684 MIRBuilder.getMF());
1685 ToTruncate = DefaultReg;
1686 }
1687 auto NewRegister =
1688 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType, true);
1689 MIRBuilder.buildCopy(DefaultReg, NewRegister);
1690 } else { // If it could be in range, we need to load from the given builtin.
1691 auto Vec3Ty =
1692 GR->getOrCreateSPIRVVectorType(PointerSizeType, 3, MIRBuilder, true);
1693 Register LoadedVector =
1694 buildBuiltinVariableLoad(MIRBuilder, Vec3Ty, GR, BuiltinValue,
1695 LLT::fixed_vector(3, PointerSize));
1696 // Set up the vreg to extract the result to (possibly a new temporary one).
1697 Register Extracted = Call->ReturnRegister;
1698 if (!IsConstantIndex || PointerSize != ResultWidth) {
1699 Extracted = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1700 MRI->setRegClass(Extracted, &SPIRV::iIDRegClass);
1701 GR->assignSPIRVTypeToVReg(PointerSizeType, Extracted, MIRBuilder.getMF());
1702 }
1703 // Use Intrinsic::spv_extractelt so dynamic vs static extraction is
1704 // handled later: extr = spv_extractelt LoadedVector, IndexRegister.
1705 MachineInstrBuilder ExtractInst = MIRBuilder.buildIntrinsic(
1706 Intrinsic::spv_extractelt, ArrayRef<Register>{Extracted}, true, false);
1707 ExtractInst.addUse(LoadedVector).addUse(IndexRegister);
1708
1709 // If the index is dynamic, need check if it's < 3, and then use a select.
1710 if (!IsConstantIndex) {
1711 updateRegType(Extracted, nullptr, PointerSizeType, GR, MIRBuilder, *MRI);
1712
1713 auto IndexType = GR->getSPIRVTypeForVReg(IndexRegister);
1714 auto BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder, true);
1715
1716 Register CompareRegister =
1718 MRI->setRegClass(CompareRegister, &SPIRV::iIDRegClass);
1719 GR->assignSPIRVTypeToVReg(BoolType, CompareRegister, MIRBuilder.getMF());
1720
1721 // Use G_ICMP to check if idxVReg < 3.
1722 MIRBuilder.buildICmp(
1723 CmpInst::ICMP_ULT, CompareRegister, IndexRegister,
1724 GR->buildConstantInt(3, MIRBuilder, IndexType, true));
1725
1726 // Get constant for the default value (0 or 1 depending on which
1727 // function).
1728 Register DefaultRegister =
1729 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType, true);
1730
1731 // Get a register for the selection result (possibly a new temporary one).
1732 Register SelectionResult = Call->ReturnRegister;
1733 if (PointerSize != ResultWidth) {
1734 SelectionResult =
1735 MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1736 MRI->setRegClass(SelectionResult, &SPIRV::iIDRegClass);
1737 GR->assignSPIRVTypeToVReg(PointerSizeType, SelectionResult,
1738 MIRBuilder.getMF());
1739 }
1740 // Create the final G_SELECT to return the extracted value or the default.
1741 MIRBuilder.buildSelect(SelectionResult, CompareRegister, Extracted,
1742 DefaultRegister);
1743 ToTruncate = SelectionResult;
1744 } else {
1745 ToTruncate = Extracted;
1746 }
1747 }
1748 // Alter the result's bitwidth if it does not match the SizeT value extracted.
1749 if (PointerSize != ResultWidth)
1750 MIRBuilder.buildZExtOrTrunc(Call->ReturnRegister, ToTruncate);
1751 return true;
1752}
1753
1755 MachineIRBuilder &MIRBuilder,
1756 SPIRVGlobalRegistry *GR) {
1757 // Lookup the builtin variable record.
1758 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1759 SPIRV::BuiltIn::BuiltIn Value =
1760 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1761
1762 if (Value == SPIRV::BuiltIn::GlobalInvocationId)
1763 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, 0);
1764
1765 // Build a load instruction for the builtin variable.
1766 unsigned BitWidth = GR->getScalarOrVectorBitWidth(Call->ReturnType);
1767 LLT LLType;
1768 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeVector)
1769 LLType =
1770 LLT::fixed_vector(Call->ReturnType->getOperand(2).getImm(), BitWidth);
1771 else
1772 LLType = LLT::scalar(BitWidth);
1773
1774 return buildBuiltinVariableLoad(MIRBuilder, Call->ReturnType, GR, Value,
1775 LLType, Call->ReturnRegister);
1776}
1777
1779 MachineIRBuilder &MIRBuilder,
1780 SPIRVGlobalRegistry *GR) {
1781 // Lookup the instruction opcode in the TableGen records.
1782 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1783 unsigned Opcode =
1784 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1785
1786 switch (Opcode) {
1787 case SPIRV::OpStore:
1788 return buildAtomicInitInst(Call, MIRBuilder);
1789 case SPIRV::OpAtomicLoad:
1790 return buildAtomicLoadInst(Call, MIRBuilder, GR);
1791 case SPIRV::OpAtomicStore:
1792 return buildAtomicStoreInst(Call, MIRBuilder, GR);
1793 case SPIRV::OpAtomicCompareExchange:
1794 case SPIRV::OpAtomicCompareExchangeWeak:
1795 return buildAtomicCompareExchangeInst(Call, Builtin, Opcode, MIRBuilder,
1796 GR);
1797 case SPIRV::OpAtomicIAdd:
1798 case SPIRV::OpAtomicISub:
1799 case SPIRV::OpAtomicOr:
1800 case SPIRV::OpAtomicXor:
1801 case SPIRV::OpAtomicAnd:
1802 case SPIRV::OpAtomicExchange:
1803 return buildAtomicRMWInst(Call, Opcode, MIRBuilder, GR);
1804 case SPIRV::OpMemoryBarrier:
1805 return buildBarrierInst(Call, SPIRV::OpMemoryBarrier, MIRBuilder, GR);
1806 case SPIRV::OpAtomicFlagTestAndSet:
1807 case SPIRV::OpAtomicFlagClear:
1808 return buildAtomicFlagInst(Call, Opcode, MIRBuilder, GR);
1809 default:
1810 if (Call->isSpirvOp())
1811 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1812 GR->getSPIRVTypeID(Call->ReturnType));
1813 return false;
1814 }
1815}
1816
1818 MachineIRBuilder &MIRBuilder,
1819 SPIRVGlobalRegistry *GR) {
1820 // Lookup the instruction opcode in the TableGen records.
1821 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1822 unsigned Opcode = SPIRV::lookupAtomicFloatingBuiltin(Builtin->Name)->Opcode;
1823
1824 switch (Opcode) {
1825 case SPIRV::OpAtomicFAddEXT:
1826 case SPIRV::OpAtomicFMinEXT:
1827 case SPIRV::OpAtomicFMaxEXT:
1828 return buildAtomicFloatingRMWInst(Call, Opcode, MIRBuilder, GR);
1829 default:
1830 return false;
1831 }
1832}
1833
1835 MachineIRBuilder &MIRBuilder,
1836 SPIRVGlobalRegistry *GR) {
1837 // Lookup the instruction opcode in the TableGen records.
1838 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1839 unsigned Opcode =
1840 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1841
1842 return buildBarrierInst(Call, Opcode, MIRBuilder, GR);
1843}
1844
1846 MachineIRBuilder &MIRBuilder,
1847 SPIRVGlobalRegistry *GR) {
1848 // Lookup the instruction opcode in the TableGen records.
1849 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1850 unsigned Opcode =
1851 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1852
1853 if (Opcode == SPIRV::OpGenericCastToPtrExplicit) {
1854 SPIRV::StorageClass::StorageClass ResSC =
1855 GR->getPointerStorageClass(Call->ReturnRegister);
1856 if (!isGenericCastablePtr(ResSC))
1857 return false;
1858
1859 MIRBuilder.buildInstr(Opcode)
1860 .addDef(Call->ReturnRegister)
1861 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1862 .addUse(Call->Arguments[0])
1863 .addImm(ResSC);
1864 } else {
1865 MIRBuilder.buildInstr(TargetOpcode::G_ADDRSPACE_CAST)
1866 .addDef(Call->ReturnRegister)
1867 .addUse(Call->Arguments[0]);
1868 }
1869 return true;
1870}
1871
1872static bool generateDotOrFMulInst(const StringRef DemangledCall,
1874 MachineIRBuilder &MIRBuilder,
1875 SPIRVGlobalRegistry *GR) {
1876 if (Call->isSpirvOp())
1877 return buildOpFromWrapper(MIRBuilder, SPIRV::OpDot, Call,
1878 GR->getSPIRVTypeID(Call->ReturnType));
1879
1880 bool IsVec = GR->getSPIRVTypeForVReg(Call->Arguments[0])->getOpcode() ==
1881 SPIRV::OpTypeVector;
1882 // Use OpDot only in case of vector args and OpFMul in case of scalar args.
1883 uint32_t OC = IsVec ? SPIRV::OpDot : SPIRV::OpFMulS;
1884 bool IsSwapReq = false;
1885
1886 const auto *ST =
1887 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
1888 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt) &&
1889 (ST->canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
1890 ST->isAtLeastSPIRVVer(VersionTuple(1, 6)))) {
1891 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1892 const SPIRV::IntegerDotProductBuiltin *IntDot =
1893 SPIRV::lookupIntegerDotProductBuiltin(Builtin->Name);
1894 if (IntDot) {
1895 OC = IntDot->Opcode;
1896 IsSwapReq = IntDot->IsSwapReq;
1897 } else if (IsVec) {
1898 // Handling "dot" and "dot_acc_sat" builtins which use vectors of
1899 // integers.
1900 LLVMContext &Ctx = MIRBuilder.getContext();
1902 SPIRV::parseBuiltinTypeStr(TypeStrs, DemangledCall, Ctx);
1903 bool IsFirstSigned = TypeStrs[0].trim()[0] != 'u';
1904 bool IsSecondSigned = TypeStrs[1].trim()[0] != 'u';
1905
1906 if (Call->BuiltinName == "dot") {
1907 if (IsFirstSigned && IsSecondSigned)
1908 OC = SPIRV::OpSDot;
1909 else if (!IsFirstSigned && !IsSecondSigned)
1910 OC = SPIRV::OpUDot;
1911 else {
1912 OC = SPIRV::OpSUDot;
1913 if (!IsFirstSigned)
1914 IsSwapReq = true;
1915 }
1916 } else if (Call->BuiltinName == "dot_acc_sat") {
1917 if (IsFirstSigned && IsSecondSigned)
1918 OC = SPIRV::OpSDotAccSat;
1919 else if (!IsFirstSigned && !IsSecondSigned)
1920 OC = SPIRV::OpUDotAccSat;
1921 else {
1922 OC = SPIRV::OpSUDotAccSat;
1923 if (!IsFirstSigned)
1924 IsSwapReq = true;
1925 }
1926 }
1927 }
1928 }
1929
1930 MachineInstrBuilder MIB = MIRBuilder.buildInstr(OC)
1931 .addDef(Call->ReturnRegister)
1932 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1933
1934 if (IsSwapReq) {
1935 MIB.addUse(Call->Arguments[1]);
1936 MIB.addUse(Call->Arguments[0]);
1937 // needed for dot_acc_sat* builtins
1938 for (size_t i = 2; i < Call->Arguments.size(); ++i)
1939 MIB.addUse(Call->Arguments[i]);
1940 } else {
1941 for (size_t i = 0; i < Call->Arguments.size(); ++i)
1942 MIB.addUse(Call->Arguments[i]);
1943 }
1944
1945 // Add Packed Vector Format for Integer dot product builtins if arguments are
1946 // scalar
1947 if (!IsVec && OC != SPIRV::OpFMulS)
1948 MIB.addImm(SPIRV::PackedVectorFormat4x8Bit);
1949
1950 return true;
1951}
1952
1954 MachineIRBuilder &MIRBuilder,
1955 SPIRVGlobalRegistry *GR) {
1956 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1957 SPIRV::BuiltIn::BuiltIn Value =
1958 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1959
1960 // For now, we only support a single Wave intrinsic with a single return type.
1961 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt);
1962 LLT LLType = LLT::scalar(GR->getScalarOrVectorBitWidth(Call->ReturnType));
1963
1965 MIRBuilder, Call->ReturnType, GR, Value, LLType, Call->ReturnRegister,
1966 /* isConst= */ false, /* LinkageType= */ std::nullopt);
1967}
1968
1969// We expect a builtin
1970// Name(ptr sret([RetType]) %result, Type %operand1, Type %operand1)
1971// where %result is a pointer to where the result of the builtin execution
1972// is to be stored, and generate the following instructions:
1973// Res = Opcode RetType Operand1 Operand1
1974// OpStore RetVariable Res
1976 MachineIRBuilder &MIRBuilder,
1977 SPIRVGlobalRegistry *GR) {
1978 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1979 unsigned Opcode =
1980 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1981
1982 Register SRetReg = Call->Arguments[0];
1983 SPIRVTypeInst PtrRetType = GR->getSPIRVTypeForVReg(SRetReg);
1984 SPIRVTypeInst RetType = GR->getPointeeType(PtrRetType);
1985 if (!RetType)
1986 report_fatal_error("The first parameter must be a pointer");
1987 if (RetType->getOpcode() != SPIRV::OpTypeStruct)
1988 report_fatal_error("Expected struct type result for the arithmetic with "
1989 "overflow builtins");
1990
1991 SPIRVTypeInst OpType1 = GR->getSPIRVTypeForVReg(Call->Arguments[1]);
1992 SPIRVTypeInst OpType2 = GR->getSPIRVTypeForVReg(Call->Arguments[2]);
1993 if (!OpType1 || !OpType2 || OpType1 != OpType2)
1994 report_fatal_error("Operands must have the same type");
1995 if (OpType1->getOpcode() == SPIRV::OpTypeVector)
1996 switch (Opcode) {
1997 case SPIRV::OpIAddCarryS:
1998 Opcode = SPIRV::OpIAddCarryV;
1999 break;
2000 case SPIRV::OpISubBorrowS:
2001 Opcode = SPIRV::OpISubBorrowV;
2002 break;
2003 }
2004
2005 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2006 Register ResReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2007 if (const TargetRegisterClass *DstRC =
2008 MRI->getRegClassOrNull(Call->Arguments[1])) {
2009 MRI->setRegClass(ResReg, DstRC);
2010 MRI->setType(ResReg, MRI->getType(Call->Arguments[1]));
2011 } else {
2012 MRI->setType(ResReg, LLT::scalar(64));
2013 }
2014 GR->assignSPIRVTypeToVReg(RetType, ResReg, MIRBuilder.getMF());
2015 MIRBuilder.buildInstr(Opcode)
2016 .addDef(ResReg)
2017 .addUse(GR->getSPIRVTypeID(RetType))
2018 .addUse(Call->Arguments[1])
2019 .addUse(Call->Arguments[2]);
2020 MIRBuilder.buildInstr(SPIRV::OpStore).addUse(SRetReg).addUse(ResReg);
2021 return true;
2022}
2023
2025 MachineIRBuilder &MIRBuilder,
2026 SPIRVGlobalRegistry *GR) {
2027 // Lookup the builtin record.
2028 SPIRV::BuiltIn::BuiltIn Value =
2029 SPIRV::lookupGetBuiltin(Call->Builtin->Name, Call->Builtin->Set)->Value;
2030 const bool IsDefaultOne = (Value == SPIRV::BuiltIn::GlobalSize ||
2031 Value == SPIRV::BuiltIn::NumWorkgroups ||
2032 Value == SPIRV::BuiltIn::WorkgroupSize ||
2033 Value == SPIRV::BuiltIn::EnqueuedWorkgroupSize);
2034 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, IsDefaultOne ? 1 : 0);
2035}
2036
2038 MachineIRBuilder &MIRBuilder,
2039 SPIRVGlobalRegistry *GR) {
2040 // Lookup the image size query component number in the TableGen records.
2041 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2042 uint32_t Component =
2043 SPIRV::lookupImageQueryBuiltin(Builtin->Name, Builtin->Set)->Component;
2044 // Query result may either be a vector or a scalar. If return type is not a
2045 // vector, expect only a single size component. Otherwise get the number of
2046 // expected components.
2047 unsigned NumExpectedRetComponents =
2048 Call->ReturnType->getOpcode() == SPIRV::OpTypeVector
2049 ? Call->ReturnType->getOperand(2).getImm()
2050 : 1;
2051 // Get the actual number of query result/size components.
2052 SPIRVTypeInst ImgType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
2053 unsigned NumActualRetComponents = getNumSizeComponents(ImgType);
2054 Register QueryResult = Call->ReturnRegister;
2055 SPIRVTypeInst QueryResultType = Call->ReturnType;
2056 if (NumExpectedRetComponents != NumActualRetComponents) {
2057 unsigned Bitwidth = Call->ReturnType->getOpcode() == SPIRV::OpTypeInt
2058 ? Call->ReturnType->getOperand(1).getImm()
2059 : 32;
2060 QueryResult = MIRBuilder.getMRI()->createGenericVirtualRegister(
2061 LLT::fixed_vector(NumActualRetComponents, Bitwidth));
2062 MIRBuilder.getMRI()->setRegClass(QueryResult, &SPIRV::vIDRegClass);
2063 SPIRVTypeInst IntTy = GR->getOrCreateSPIRVIntegerType(Bitwidth, MIRBuilder);
2064 QueryResultType = GR->getOrCreateSPIRVVectorType(
2065 IntTy, NumActualRetComponents, MIRBuilder, true);
2066 GR->assignSPIRVTypeToVReg(QueryResultType, QueryResult, MIRBuilder.getMF());
2067 }
2068 bool IsDimBuf = ImgType->getOperand(2).getImm() == SPIRV::Dim::DIM_Buffer;
2069 unsigned Opcode =
2070 IsDimBuf ? SPIRV::OpImageQuerySize : SPIRV::OpImageQuerySizeLod;
2071 auto MIB = MIRBuilder.buildInstr(Opcode)
2072 .addDef(QueryResult)
2073 .addUse(GR->getSPIRVTypeID(QueryResultType))
2074 .addUse(Call->Arguments[0]);
2075 if (!IsDimBuf)
2076 MIB.addUse(buildConstantIntReg32(0, MIRBuilder, GR)); // Lod id.
2077 if (NumExpectedRetComponents == NumActualRetComponents)
2078 return true;
2079 if (NumExpectedRetComponents == 1) {
2080 // Only 1 component is expected, build OpCompositeExtract instruction.
2081 unsigned ExtractedComposite =
2082 Component == 3 ? NumActualRetComponents - 1 : Component;
2083 assert(ExtractedComposite < NumActualRetComponents &&
2084 "Invalid composite index!");
2085 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2086 SPIRVTypeInst NewType = nullptr;
2087 if (QueryResultType->getOpcode() == SPIRV::OpTypeVector) {
2088 Register NewTypeReg = QueryResultType->getOperand(1).getReg();
2089 if (TypeReg != NewTypeReg &&
2090 (NewType = GR->getSPIRVTypeForVReg(NewTypeReg)))
2091 TypeReg = NewTypeReg;
2092 }
2093 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
2094 .addDef(Call->ReturnRegister)
2095 .addUse(TypeReg)
2096 .addUse(QueryResult)
2097 .addImm(ExtractedComposite);
2098 if (NewType)
2099 updateRegType(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder,
2100 MIRBuilder.getMF().getRegInfo());
2101 } else {
2102 // More than 1 component is expected, fill a new vector.
2103 auto MIB = MIRBuilder.buildInstr(SPIRV::OpVectorShuffle)
2104 .addDef(Call->ReturnRegister)
2105 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2106 .addUse(QueryResult)
2107 .addUse(QueryResult);
2108 for (unsigned i = 0; i < NumExpectedRetComponents; ++i)
2109 MIB.addImm(i < NumActualRetComponents ? i : 0xffffffff);
2110 }
2111 return true;
2112}
2113
2115 MachineIRBuilder &MIRBuilder,
2116 SPIRVGlobalRegistry *GR) {
2117 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt &&
2118 "Image samples query result must be of int type!");
2119
2120 // Lookup the instruction opcode in the TableGen records.
2121 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2122 unsigned Opcode =
2123 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2124
2125 Register Image = Call->Arguments[0];
2126 SPIRV::Dim::Dim ImageDimensionality = static_cast<SPIRV::Dim::Dim>(
2127 GR->getSPIRVTypeForVReg(Image)->getOperand(2).getImm());
2128 (void)ImageDimensionality;
2129
2130 switch (Opcode) {
2131 case SPIRV::OpImageQuerySamples:
2132 assert(ImageDimensionality == SPIRV::Dim::DIM_2D &&
2133 "Image must be of 2D dimensionality");
2134 break;
2135 case SPIRV::OpImageQueryLevels:
2136 assert((ImageDimensionality == SPIRV::Dim::DIM_1D ||
2137 ImageDimensionality == SPIRV::Dim::DIM_2D ||
2138 ImageDimensionality == SPIRV::Dim::DIM_3D ||
2139 ImageDimensionality == SPIRV::Dim::DIM_Cube) &&
2140 "Image must be of 1D/2D/3D/Cube dimensionality");
2141 break;
2142 }
2143
2144 MIRBuilder.buildInstr(Opcode)
2145 .addDef(Call->ReturnRegister)
2146 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2147 .addUse(Image);
2148 return true;
2149}
2150
2151// TODO: Move to TableGen.
2152static SPIRV::SamplerAddressingMode::SamplerAddressingMode
2154 switch (Bitmask & SPIRV::CLK_ADDRESS_MODE_MASK) {
2155 case SPIRV::CLK_ADDRESS_CLAMP:
2156 return SPIRV::SamplerAddressingMode::Clamp;
2157 case SPIRV::CLK_ADDRESS_CLAMP_TO_EDGE:
2158 return SPIRV::SamplerAddressingMode::ClampToEdge;
2159 case SPIRV::CLK_ADDRESS_REPEAT:
2160 return SPIRV::SamplerAddressingMode::Repeat;
2161 case SPIRV::CLK_ADDRESS_MIRRORED_REPEAT:
2162 return SPIRV::SamplerAddressingMode::RepeatMirrored;
2163 case SPIRV::CLK_ADDRESS_NONE:
2164 return SPIRV::SamplerAddressingMode::None;
2165 default:
2166 report_fatal_error("Unknown CL address mode");
2167 }
2168}
2169
2170static unsigned getSamplerParamFromBitmask(unsigned Bitmask) {
2171 return (Bitmask & SPIRV::CLK_NORMALIZED_COORDS_TRUE) ? 1 : 0;
2172}
2173
2174static SPIRV::SamplerFilterMode::SamplerFilterMode
2176 if (Bitmask & SPIRV::CLK_FILTER_LINEAR)
2177 return SPIRV::SamplerFilterMode::Linear;
2178 if (Bitmask & SPIRV::CLK_FILTER_NEAREST)
2179 return SPIRV::SamplerFilterMode::Nearest;
2180 return SPIRV::SamplerFilterMode::Nearest;
2181}
2182
2183static bool generateReadImageInst(const StringRef DemangledCall,
2185 MachineIRBuilder &MIRBuilder,
2186 SPIRVGlobalRegistry *GR) {
2187 if (Call->isSpirvOp())
2188 return buildOpFromWrapper(MIRBuilder, SPIRV::OpImageRead, Call,
2189 GR->getSPIRVTypeID(Call->ReturnType));
2190 Register Image = Call->Arguments[0];
2191 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2192 bool HasOclSampler = DemangledCall.contains_insensitive("ocl_sampler");
2193 bool HasMsaa = DemangledCall.contains_insensitive("msaa");
2194 if (HasOclSampler) {
2195 Register Sampler = Call->Arguments[1];
2196
2197 if (!GR->isScalarOfType(Sampler, SPIRV::OpTypeSampler) &&
2198 getDefInstrMaybeConstant(Sampler, MRI)->getOperand(1).isCImm()) {
2199 uint64_t SamplerMask = getIConstVal(Sampler, MRI);
2202 getSamplerParamFromBitmask(SamplerMask),
2203 getSamplerFilterModeFromBitmask(SamplerMask), MIRBuilder);
2204 }
2205 SPIRVTypeInst ImageType = GR->getSPIRVTypeForVReg(Image);
2206 SPIRVTypeInst SampledImageType =
2207 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
2208 Register SampledImage = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2209
2210 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
2211 .addDef(SampledImage)
2212 .addUse(GR->getSPIRVTypeID(SampledImageType))
2213 .addUse(Image)
2214 .addUse(Sampler);
2215
2217 MIRBuilder);
2218
2219 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeVector) {
2220 SPIRVTypeInst TempType =
2221 GR->getOrCreateSPIRVVectorType(Call->ReturnType, 4, MIRBuilder, true);
2222 Register TempRegister =
2223 MRI->createGenericVirtualRegister(GR->getRegType(TempType));
2224 MRI->setRegClass(TempRegister, GR->getRegClass(TempType));
2225 GR->assignSPIRVTypeToVReg(TempType, TempRegister, MIRBuilder.getMF());
2226 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
2227 .addDef(TempRegister)
2228 .addUse(GR->getSPIRVTypeID(TempType))
2229 .addUse(SampledImage)
2230 .addUse(Call->Arguments[2]) // Coordinate.
2231 .addImm(SPIRV::ImageOperand::Lod)
2232 .addUse(Lod);
2233 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
2234 .addDef(Call->ReturnRegister)
2235 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2236 .addUse(TempRegister)
2237 .addImm(0);
2238 } else {
2239 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
2240 .addDef(Call->ReturnRegister)
2241 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2242 .addUse(SampledImage)
2243 .addUse(Call->Arguments[2]) // Coordinate.
2244 .addImm(SPIRV::ImageOperand::Lod)
2245 .addUse(Lod);
2246 }
2247 } else if (HasMsaa) {
2248 MIRBuilder.buildInstr(SPIRV::OpImageRead)
2249 .addDef(Call->ReturnRegister)
2250 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2251 .addUse(Image)
2252 .addUse(Call->Arguments[1]) // Coordinate.
2253 .addImm(SPIRV::ImageOperand::Sample)
2254 .addUse(Call->Arguments[2]);
2255 } else {
2256 MIRBuilder.buildInstr(SPIRV::OpImageRead)
2257 .addDef(Call->ReturnRegister)
2258 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2259 .addUse(Image)
2260 .addUse(Call->Arguments[1]); // Coordinate.
2261 }
2262 return true;
2263}
2264
2266 MachineIRBuilder &MIRBuilder,
2267 SPIRVGlobalRegistry *GR) {
2268 if (Call->isSpirvOp())
2269 return buildOpFromWrapper(MIRBuilder, SPIRV::OpImageWrite, Call,
2270 Register(0));
2271 MIRBuilder.buildInstr(SPIRV::OpImageWrite)
2272 .addUse(Call->Arguments[0]) // Image.
2273 .addUse(Call->Arguments[1]) // Coordinate.
2274 .addUse(Call->Arguments[2]); // Texel.
2275 return true;
2276}
2277
2278static bool generateSampleImageInst(const StringRef DemangledCall,
2280 MachineIRBuilder &MIRBuilder,
2281 SPIRVGlobalRegistry *GR) {
2282 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2283 if (Call->Builtin->Name.contains_insensitive(
2284 "__translate_sampler_initializer")) {
2285 // Build sampler literal.
2286 uint64_t Bitmask = getIConstVal(Call->Arguments[0], MRI);
2288 Call->ReturnRegister, getSamplerAddressingModeFromBitmask(Bitmask),
2290 getSamplerFilterModeFromBitmask(Bitmask), MIRBuilder);
2291 return Sampler.isValid();
2292 } else if (Call->Builtin->Name.contains_insensitive("__spirv_SampledImage")) {
2293 // Create OpSampledImage.
2294 Register Image = Call->Arguments[0];
2295 SPIRVTypeInst ImageType = GR->getSPIRVTypeForVReg(Image);
2296 SPIRVTypeInst SampledImageType =
2297 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
2298 Register SampledImage =
2299 Call->ReturnRegister.isValid()
2300 ? Call->ReturnRegister
2301 : MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2302 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
2303 .addDef(SampledImage)
2304 .addUse(GR->getSPIRVTypeID(SampledImageType))
2305 .addUse(Image)
2306 .addUse(Call->Arguments[1]); // Sampler.
2307 return true;
2308 } else if (Call->Builtin->Name.contains_insensitive(
2309 "__spirv_ImageSampleExplicitLod")) {
2310 // Sample an image using an explicit level of detail.
2311 std::string ReturnType = DemangledCall.str();
2312 if (DemangledCall.contains("_R")) {
2313 ReturnType = ReturnType.substr(ReturnType.find("_R") + 2);
2314 ReturnType = ReturnType.substr(0, ReturnType.find('('));
2315 }
2316 SPIRVTypeInst Type = Call->ReturnType
2317 ? Call->ReturnType
2319 ReturnType, MIRBuilder, true));
2320 if (!Type) {
2321 std::string DiagMsg =
2322 "Unable to recognize SPIRV type name: " + ReturnType;
2323 report_fatal_error(DiagMsg.c_str());
2324 }
2325 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
2326 .addDef(Call->ReturnRegister)
2328 .addUse(Call->Arguments[0]) // Image.
2329 .addUse(Call->Arguments[1]) // Coordinate.
2330 .addImm(SPIRV::ImageOperand::Lod)
2331 .addUse(Call->Arguments[3]);
2332 return true;
2333 }
2334 return false;
2335}
2336
2338 MachineIRBuilder &MIRBuilder) {
2339 MIRBuilder.buildSelect(Call->ReturnRegister, Call->Arguments[0],
2340 Call->Arguments[1], Call->Arguments[2]);
2341 return true;
2342}
2343
2345 MachineIRBuilder &MIRBuilder,
2346 SPIRVGlobalRegistry *GR) {
2347 createContinuedInstructions(MIRBuilder, SPIRV::OpCompositeConstruct, 3,
2348 SPIRV::OpCompositeConstructContinuedINTEL,
2349 Call->Arguments, Call->ReturnRegister,
2350 GR->getSPIRVTypeID(Call->ReturnType));
2351 return true;
2352}
2353
2355 MachineIRBuilder &MIRBuilder,
2356 SPIRVGlobalRegistry *GR) {
2357 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2358 unsigned Opcode =
2359 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2360 bool IsSet = Opcode != SPIRV::OpCooperativeMatrixStoreKHR &&
2361 Opcode != SPIRV::OpCooperativeMatrixStoreCheckedINTEL &&
2362 Opcode != SPIRV::OpCooperativeMatrixPrefetchINTEL;
2363 unsigned ArgSz = Call->Arguments.size();
2364 unsigned LiteralIdx = 0;
2365 switch (Opcode) {
2366 // Memory operand is optional and is literal.
2367 case SPIRV::OpCooperativeMatrixLoadKHR:
2368 LiteralIdx = ArgSz > 3 ? 3 : 0;
2369 break;
2370 case SPIRV::OpCooperativeMatrixStoreKHR:
2371 LiteralIdx = ArgSz > 4 ? 4 : 0;
2372 break;
2373 case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
2374 LiteralIdx = ArgSz > 7 ? 7 : 0;
2375 break;
2376 case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
2377 LiteralIdx = ArgSz > 8 ? 8 : 0;
2378 break;
2379 // Cooperative Matrix Operands operand is optional and is literal.
2380 case SPIRV::OpCooperativeMatrixMulAddKHR:
2381 LiteralIdx = ArgSz > 3 ? 3 : 0;
2382 break;
2383 };
2384
2386 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2387 if (Opcode == SPIRV::OpCooperativeMatrixPrefetchINTEL) {
2388 const uint32_t CacheLevel = getConstFromIntrinsic(Call->Arguments[3], MRI);
2389 auto MIB = MIRBuilder.buildInstr(SPIRV::OpCooperativeMatrixPrefetchINTEL)
2390 .addUse(Call->Arguments[0]) // pointer
2391 .addUse(Call->Arguments[1]) // rows
2392 .addUse(Call->Arguments[2]) // columns
2393 .addImm(CacheLevel) // cache level
2394 .addUse(Call->Arguments[4]); // memory layout
2395 if (ArgSz > 5)
2396 MIB.addUse(Call->Arguments[5]); // stride
2397 if (ArgSz > 6) {
2398 const uint32_t MemOp = getConstFromIntrinsic(Call->Arguments[6], MRI);
2399 MIB.addImm(MemOp); // memory operand
2400 }
2401 return true;
2402 }
2403 if (LiteralIdx > 0)
2404 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[LiteralIdx], MRI));
2405 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2406 if (Opcode == SPIRV::OpCooperativeMatrixLengthKHR) {
2407 SPIRVTypeInst CoopMatrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
2408 if (!CoopMatrType)
2409 report_fatal_error("Can't find a register's type definition");
2410 MIRBuilder.buildInstr(Opcode)
2411 .addDef(Call->ReturnRegister)
2412 .addUse(TypeReg)
2413 .addUse(CoopMatrType->getOperand(0).getReg());
2414 return true;
2415 }
2416 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2417 IsSet ? TypeReg : Register(0), ImmArgs);
2418}
2419
2421 MachineIRBuilder &MIRBuilder,
2422 SPIRVGlobalRegistry *GR) {
2423 // Lookup the instruction opcode in the TableGen records.
2424 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2425 unsigned Opcode =
2426 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2427 const MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2428
2429 switch (Opcode) {
2430 case SPIRV::OpSpecConstant: {
2431 // Determine the constant MI.
2432 Register ConstRegister = Call->Arguments[1];
2433 const MachineInstr *Const = getDefInstrMaybeConstant(ConstRegister, MRI);
2434 assert(Const &&
2435 (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
2436 Const->getOpcode() == TargetOpcode::G_FCONSTANT) &&
2437 "Argument should be either an int or floating-point constant");
2438 // Determine the opcode and built the OpSpec MI.
2439 const MachineOperand &ConstOperand = Const->getOperand(1);
2440 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeBool) {
2441 assert(ConstOperand.isCImm() && "Int constant operand is expected");
2442 Opcode = ConstOperand.getCImm()->getValue().getZExtValue()
2443 ? SPIRV::OpSpecConstantTrue
2444 : SPIRV::OpSpecConstantFalse;
2445 }
2446 auto MIB = MIRBuilder.buildInstr(Opcode)
2447 .addDef(Call->ReturnRegister)
2448 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2449
2450 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeBool) {
2451 if (Const->getOpcode() == TargetOpcode::G_CONSTANT)
2452 addNumImm(ConstOperand.getCImm()->getValue(), MIB);
2453 else
2454 addNumImm(ConstOperand.getFPImm()->getValueAPF().bitcastToAPInt(), MIB);
2455 }
2456 // Build the SpecID decoration.
2457 unsigned SpecId =
2458 static_cast<unsigned>(getIConstVal(Call->Arguments[0], MRI));
2459 buildOpDecorate(Call->ReturnRegister, MIRBuilder, SPIRV::Decoration::SpecId,
2460 {SpecId});
2461 return true;
2462 }
2463 case SPIRV::OpSpecConstantComposite: {
2464 createContinuedInstructions(MIRBuilder, Opcode, 3,
2465 SPIRV::OpSpecConstantCompositeContinuedINTEL,
2466 Call->Arguments, Call->ReturnRegister,
2467 GR->getSPIRVTypeID(Call->ReturnType));
2468 return true;
2469 }
2470 default:
2471 return false;
2472 }
2473}
2474
2476 MachineIRBuilder &MIRBuilder,
2477 SPIRVGlobalRegistry *GR) {
2478 // Lookup the instruction opcode in the TableGen records.
2479 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2480 unsigned Opcode =
2481 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2482
2483 return buildExtendedBitOpsInst(Call, Opcode, MIRBuilder, GR);
2484}
2485
2487 MachineIRBuilder &MIRBuilder,
2488 SPIRVGlobalRegistry *GR) {
2489 // Lookup the instruction opcode in the TableGen records.
2490 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2491 unsigned Opcode =
2492 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2493
2494 return buildBindlessImageINTELInst(Call, Opcode, MIRBuilder, GR);
2495}
2496
2498 MachineIRBuilder &MIRBuilder,
2499 SPIRVGlobalRegistry *GR) {
2500 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2501 unsigned Opcode =
2502 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2503 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
2504}
2505
2507 unsigned Opcode, MachineIRBuilder &MIRBuilder,
2508 SPIRVGlobalRegistry *GR) {
2509 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2511 Register InputReg = Call->Arguments[0];
2512 const Type *RetTy = GR->getTypeForSPIRVType(Call->ReturnType);
2513 bool IsSRet = RetTy->isVoidTy();
2514
2515 if (IsSRet) {
2516 const LLT ValTy = MRI->getType(InputReg);
2517 Register ActualRetValReg = MRI->createGenericVirtualRegister(ValTy);
2518 SPIRVTypeInst InstructionType =
2519 GR->getPointeeType(GR->getSPIRVTypeForVReg(InputReg));
2520 InputReg = Call->Arguments[1];
2521 auto InputType = GR->getTypeForSPIRVType(GR->getSPIRVTypeForVReg(InputReg));
2522 Register PtrInputReg;
2523 if (InputType->getTypeID() == llvm::Type::TypeID::TypedPointerTyID) {
2524 LLT InputLLT = MRI->getType(InputReg);
2525 PtrInputReg = MRI->createGenericVirtualRegister(InputLLT);
2526 SPIRVTypeInst PtrType =
2527 GR->getPointeeType(GR->getSPIRVTypeForVReg(InputReg));
2528 MachineMemOperand *MMO1 = MIRBuilder.getMF().getMachineMemOperand(
2530 InputLLT.getSizeInBytes(), Align(4));
2531 MIRBuilder.buildLoad(PtrInputReg, InputReg, *MMO1);
2532 MRI->setRegClass(PtrInputReg, &SPIRV::iIDRegClass);
2533 GR->assignSPIRVTypeToVReg(PtrType, PtrInputReg, MIRBuilder.getMF());
2534 }
2535
2536 for (unsigned index = 2; index < 7; index++) {
2537 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[index], MRI));
2538 }
2539
2540 // Emit the instruction
2541 auto MIB = MIRBuilder.buildInstr(Opcode)
2542 .addDef(ActualRetValReg)
2543 .addUse(GR->getSPIRVTypeID(InstructionType));
2544 if (PtrInputReg)
2545 MIB.addUse(PtrInputReg);
2546 else
2547 MIB.addUse(InputReg);
2548
2549 for (uint32_t Imm : ImmArgs)
2550 MIB.addImm(Imm);
2551 unsigned Size = ValTy.getSizeInBytes();
2552 // Store result to the pointer passed in Arg[0]
2553 MachineMemOperand *MMO = MIRBuilder.getMF().getMachineMemOperand(
2555 MRI->setRegClass(ActualRetValReg, &SPIRV::pIDRegClass);
2556 MIRBuilder.buildStore(ActualRetValReg, Call->Arguments[0], *MMO);
2557 return true;
2558 } else {
2559 for (unsigned index = 1; index < 6; index++)
2560 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[index], MRI));
2561
2562 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2563 GR->getSPIRVTypeID(Call->ReturnType), ImmArgs);
2564 }
2565}
2566
2568 MachineIRBuilder &MIRBuilder,
2569 SPIRVGlobalRegistry *GR) {
2570 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2571 unsigned Opcode =
2572 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2573
2574 return buildAPFixedPointInst(Call, Opcode, MIRBuilder, GR);
2575}
2576
2577static bool
2579 MachineIRBuilder &MIRBuilder,
2580 SPIRVGlobalRegistry *GR) {
2581 // Lookup the instruction opcode in the TableGen records.
2582 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2583 unsigned Opcode =
2584 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2585
2586 return buildTernaryBitwiseFunctionINTELInst(Call, Opcode, MIRBuilder, GR);
2587}
2588
2590 MachineIRBuilder &MIRBuilder,
2591 SPIRVGlobalRegistry *GR) {
2592 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2593 unsigned Opcode =
2594 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2595
2596 return buildImageChannelDataTypeInst(Call, Opcode, MIRBuilder, GR);
2597}
2598
2600 MachineIRBuilder &MIRBuilder,
2601 SPIRVGlobalRegistry *GR) {
2602 // Lookup the instruction opcode in the TableGen records.
2603 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2604 unsigned Opcode =
2605 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2606
2607 return build2DBlockIOINTELInst(Call, Opcode, MIRBuilder, GR);
2608}
2609
2611 MachineIRBuilder &MIRBuilder,
2612 SPIRVGlobalRegistry *GR) {
2613 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2614 unsigned Opcode =
2615 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2616
2617 unsigned Scope = SPIRV::Scope::Workgroup;
2618 if (Builtin->Name.contains("sub_group"))
2619 Scope = SPIRV::Scope::Subgroup;
2620
2621 return buildPipeInst(Call, Opcode, Scope, MIRBuilder, GR);
2622}
2623
2625 MachineIRBuilder &MIRBuilder,
2626 SPIRVGlobalRegistry *GR) {
2627 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2628 unsigned Opcode =
2629 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2630
2631 bool IsSet = Opcode != SPIRV::OpPredicatedStoreINTEL;
2632 unsigned ArgSz = Call->Arguments.size();
2634 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2635 // Memory operand is optional and is literal.
2636 if (ArgSz > 3)
2637 ImmArgs.push_back(
2638 getConstFromIntrinsic(Call->Arguments[/*Literal index*/ 3], MRI));
2639
2640 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2641 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2642 IsSet ? TypeReg : Register(0), ImmArgs);
2643}
2644
2646 MachineIRBuilder &MIRBuilder,
2647 SPIRVGlobalRegistry *GR) {
2648 // The OpenCL ndrange_*D functions are overloaded and support 1D, 2D, and 3D
2649 // variants, accepting 1 to 3 arguments:
2650 // (global_work_size)
2651 // (global_work_size, local_work_size)
2652 // (global_work_offset, global_work_size, local_work_size)
2653 // Note: When all three arguments are provided, they are reordered compared
2654 // to the one- or two-argument form.
2655 //
2656 // The function may return data through an sret argument at position 0 (with
2657 // a void function return type). When present, all other argument indices are
2658 // adjusted accordingly.
2659 //
2660 // SPIR-V's OpBuildNDRange requires all three arguments (GlobalWorkSize,
2661 // LocalWorkSize, GlobalWorkOffset). For 1D kernels, the values are scalars;
2662 // for 2D/3D kernels, they are arrays of 2 or 3 elements. Missing arguments
2663 // default to zero.
2664 //
2665 // Calculate argument indices based on the number of arguments and presence
2666 // of sret:
2667 const unsigned NumCallArgs = Call->Arguments.size();
2668 const unsigned MaxCallArgs = Call->Builtin->MaxNumArgs;
2669 const unsigned IncorrectArgIdx = MaxCallArgs + 1;
2670
2671 const Type *RetTy = GR->getTypeForSPIRVType(Call->ReturnType);
2672 bool HasSRetArg = RetTy->isVoidTy();
2673
2674 const unsigned SRetArgIdx = HasSRetArg ? 0 : IncorrectArgIdx;
2675 const unsigned ArgBase = HasSRetArg ? 1 : 0;
2676 const unsigned MaxNDRangeArgs = 3;
2677 const unsigned NumNDRangeArgs = NumCallArgs - ArgBase;
2678
2679 const unsigned GlobalWorkSizeArgIdx =
2680 NumNDRangeArgs < MaxNDRangeArgs ? ArgBase : ArgBase + 1;
2681 const unsigned LocalWorkSizeArgIdx =
2682 (NumNDRangeArgs == 1)
2683 ? IncorrectArgIdx
2684 : (NumNDRangeArgs == MaxNDRangeArgs ? ArgBase + 2 : ArgBase + 1);
2685 const unsigned GlobalWorkOffsetArgIdx =
2686 NumNDRangeArgs == MaxNDRangeArgs ? ArgBase : IncorrectArgIdx;
2687
2688 // Each nd_range field is an array of <Dimension> integers matching the
2689 // address model width (32 or 64 bits).
2690 const unsigned AddressModelBits = GR->getPointerSize();
2691 assert(AddressModelBits == 64 || AddressModelBits == 32);
2692
2693 // The dimension is encoded in the function name as "ndrange_XD" where X is
2694 // 1, 2, or 3.
2695 unsigned Dimension = 0;
2696 Call->Builtin->Name.substr(8, 1).getAsInteger(10, Dimension);
2697 assert(Dimension <= 3 && Dimension >= 1);
2698
2699 // Determine the work size type based on the dimension. For missing arguments,
2700 // create a zero constant of the appropriate type.
2701 MachineFunction &MF = MIRBuilder.getMF();
2702 SPIRVTypeInst SpvFieldTy;
2703 Register ConstZero;
2704 if (Dimension == 1) {
2705 SpvFieldTy = GR->getSPIRVTypeForVReg(Call->Arguments[GlobalWorkSizeArgIdx]);
2706 assert(SpvFieldTy && SpvFieldTy->getOpcode() == SPIRV::OpTypeInt &&
2707 "Expected scalar integer type");
2708
2709 if (NumNDRangeArgs < MaxNDRangeArgs)
2710 ConstZero = GR->buildConstantInt(0, MIRBuilder, SpvFieldTy, true);
2711 } else {
2712 Type *BaseTy =
2713 IntegerType::get(MF.getFunction().getContext(), AddressModelBits);
2714 Type *FieldTy = ArrayType::get(BaseTy, Dimension);
2715 SpvFieldTy = GR->getOrCreateSPIRVType(
2716 FieldTy, MIRBuilder, SPIRV::AccessQualifier::ReadOnly, true);
2717
2718 if (NumNDRangeArgs < MaxNDRangeArgs) {
2719 auto InsertIt = MIRBuilder.getInsertPt();
2720 MachineBasicBlock &MBB = MIRBuilder.getMBB();
2721 MachineInstr &InsertMI = (InsertIt != MBB.end()) ? *InsertIt : MBB.back();
2723 ConstZero = GR->getOrCreateConstIntArray(0, Dimension, InsertMI,
2724 SpvFieldTy, *ST.getInstrInfo());
2725 }
2726 }
2727
2728 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2729
2730 auto CreateDataRegister = [&](unsigned Idx) -> Register {
2731 Register Reg = (Idx == IncorrectArgIdx) ? ConstZero : Call->Arguments[Idx];
2732
2733 if (GR->getSPIRVTypeForVReg(Reg) == SpvFieldTy) {
2734 // Already has the correct type.
2735 return Reg;
2736 }
2737
2738 assert(GR->getSPIRVTypeForVReg(Reg)->getOpcode() == SPIRV::OpTypePointer &&
2739 "Only pointer types are supported for loading values");
2740
2741 Register Ptr = Reg;
2742
2743 Reg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2744 GR->assignSPIRVTypeToVReg(SpvFieldTy, Reg, MF);
2745
2746 MIRBuilder.buildInstr(SPIRV::OpLoad)
2747 .addDef(Reg)
2748 .addUse(GR->getSPIRVTypeID(SpvFieldTy))
2749 .addUse(Ptr);
2750 return Reg;
2751 };
2752
2753 Register GlobalWorkSize = CreateDataRegister(GlobalWorkSizeArgIdx);
2754 Register LocalWorkSize = CreateDataRegister(LocalWorkSizeArgIdx);
2755 Register GlobalWorkOffset = CreateDataRegister(GlobalWorkOffsetArgIdx);
2756
2757 if (!HasSRetArg) {
2758 return MIRBuilder.buildInstr(SPIRV::OpBuildNDRange)
2759 .addDef(Call->ReturnRegister)
2760 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2761 .addUse(GlobalWorkSize)
2762 .addUse(LocalWorkSize)
2763 .addUse(GlobalWorkOffset);
2764 }
2765
2766 // When sret is used, store nd_range struct through the pointer in the first
2767 // argument.
2768 Register SRetReg = Call->Arguments[SRetArgIdx];
2769 SPIRVTypeInst SRetPtrType = GR->getSPIRVTypeForVReg(SRetReg);
2770 SPIRVTypeInst SRetType = GR->getPointeeType(SRetPtrType);
2771
2772 Register TmpReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2773 GR->assignSPIRVTypeToVReg(SRetType, TmpReg, MF);
2774
2775 MIRBuilder.buildInstr(SPIRV::OpBuildNDRange)
2776 .addDef(TmpReg)
2777 .addUse(GR->getSPIRVTypeID(SRetType))
2778 .addUse(GlobalWorkSize)
2779 .addUse(LocalWorkSize)
2780 .addUse(GlobalWorkOffset);
2781 return MIRBuilder.buildInstr(SPIRV::OpStore)
2782 .addUse(Call->Arguments[SRetArgIdx])
2783 .addUse(TmpReg);
2784}
2785
2786// TODO: maybe move to the global register.
2787static SPIRVTypeInst
2789 SPIRVGlobalRegistry *GR) {
2790 LLVMContext &Context = MIRBuilder.getMF().getFunction().getContext();
2791 unsigned SC1 = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
2792 Type *PtrType = PointerType::get(Context, SC1);
2793 return GR->getOrCreateSPIRVType(PtrType, MIRBuilder,
2794 SPIRV::AccessQualifier::ReadWrite, true);
2795}
2796
2798 MachineIRBuilder &MIRBuilder,
2799 SPIRVGlobalRegistry *GR) {
2800 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2801 const DataLayout &DL = MIRBuilder.getDataLayout();
2802 bool IsSpirvOp = Call->isSpirvOp();
2803 bool HasEvents = Call->Builtin->Name.contains("events") || IsSpirvOp;
2804 const SPIRVTypeInst Int32Ty = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
2805
2806 // Make vararg instructions before OpEnqueueKernel.
2807 // Local sizes arguments: Sizes of block invoke arguments. Clang generates
2808 // local size operands as an array, so we need to unpack them.
2809 SmallVector<Register, 16> LocalSizes;
2810 if (Call->Builtin->Name.contains("_varargs") || IsSpirvOp) {
2811 const unsigned LocalSizeArrayIdx = HasEvents ? 9 : 6;
2812 Register GepReg = Call->Arguments[LocalSizeArrayIdx];
2813 MachineInstr *GepMI = MRI->getUniqueVRegDef(GepReg);
2814 assert(isSpvIntrinsic(*GepMI, Intrinsic::spv_gep) &&
2815 GepMI->getOperand(3).isReg());
2816 Register ArrayReg = GepMI->getOperand(3).getReg();
2817 MachineInstr *ArrayMI = MRI->getUniqueVRegDef(ArrayReg);
2818 const Type *LocalSizeTy = getMachineInstrType(ArrayMI);
2819 assert(LocalSizeTy && "Local size type is expected");
2820 const uint64_t LocalSizeNum =
2821 cast<ArrayType>(LocalSizeTy)->getNumElements();
2822 unsigned SC = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
2823 const LLT LLType = LLT::pointer(SC, GR->getPointerSize());
2824 const SPIRVTypeInst PointerSizeTy = GR->getOrCreateSPIRVPointerType(
2825 Int32Ty, MIRBuilder, SPIRV::StorageClass::Function);
2826 for (unsigned I = 0; I < LocalSizeNum; ++I) {
2827 Register Reg = MRI->createVirtualRegister(&SPIRV::pIDRegClass);
2828 MRI->setType(Reg, LLType);
2829 GR->assignSPIRVTypeToVReg(PointerSizeTy, Reg, MIRBuilder.getMF());
2830 auto GEPInst = MIRBuilder.buildIntrinsic(
2831 Intrinsic::spv_gep, ArrayRef<Register>{Reg}, true, false);
2832 GEPInst
2833 .addImm(GepMI->getOperand(2).getImm()) // In bound.
2834 .addUse(ArrayMI->getOperand(0).getReg()) // Alloca.
2835 .addUse(buildConstantIntReg32(0, MIRBuilder, GR)) // Indices.
2836 .addUse(buildConstantIntReg32(I, MIRBuilder, GR));
2837 LocalSizes.push_back(Reg);
2838 }
2839 }
2840
2841 // SPIRV OpEnqueueKernel instruction has 10+ arguments.
2842 auto MIB = MIRBuilder.buildInstr(SPIRV::OpEnqueueKernel)
2843 .addDef(Call->ReturnRegister)
2845
2846 // Copy all arguments before block invoke function pointer.
2847 const unsigned BlockFIdx = HasEvents ? 6 : 3;
2848 for (unsigned i = 0; i < BlockFIdx; i++)
2849 MIB.addUse(Call->Arguments[i]);
2850
2851 // If there are no event arguments in the original call, add dummy ones.
2852 if (!HasEvents) {
2853 MIB.addUse(buildConstantIntReg32(0, MIRBuilder, GR)); // Dummy num events.
2854 Register NullPtr = GR->getOrCreateConstNullPtr(
2855 MIRBuilder, getOrCreateSPIRVDeviceEventPointer(MIRBuilder, GR));
2856 MIB.addUse(NullPtr); // Dummy wait events.
2857 MIB.addUse(NullPtr); // Dummy ret event.
2858 }
2859
2860 MachineInstr *BlockMI = getBlockStructInstr(Call->Arguments[BlockFIdx], MRI);
2861 assert(BlockMI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
2862 // Invoke: Pointer to invoke function.
2863 MIB.addGlobalAddress(BlockMI->getOperand(1).getGlobal());
2864
2865 Register BlockLiteralReg = Call->Arguments[BlockFIdx + 1];
2866 // Param: Pointer to block literal.
2867 MIB.addUse(BlockLiteralReg);
2868
2869 Type *PType = const_cast<Type *>(getBlockStructType(BlockLiteralReg, MRI));
2870 // TODO: these numbers should be obtained from block literal structure.
2871 // Param Size: Size of block literal structure.
2872 MIB.addUse(buildConstantIntReg32(DL.getTypeStoreSize(PType), MIRBuilder, GR));
2873 // Param Aligment: Aligment of block literal structure.
2874 MIB.addUse(buildConstantIntReg32(DL.getPrefTypeAlign(PType).value(),
2875 MIRBuilder, GR));
2876
2877 for (unsigned i = 0; i < LocalSizes.size(); i++)
2878 MIB.addUse(LocalSizes[i]);
2879 return true;
2880}
2881
2883 MachineIRBuilder &MIRBuilder,
2884 SPIRVGlobalRegistry *GR) {
2885 // Lookup the instruction opcode in the TableGen records.
2886 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2887 unsigned Opcode =
2888 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2889
2890 switch (Opcode) {
2891 case SPIRV::OpRetainEvent:
2892 case SPIRV::OpReleaseEvent:
2893 return MIRBuilder.buildInstr(Opcode).addUse(Call->Arguments[0]);
2894 case SPIRV::OpCreateUserEvent:
2895 case SPIRV::OpGetDefaultQueue:
2896 return MIRBuilder.buildInstr(Opcode)
2897 .addDef(Call->ReturnRegister)
2898 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2899 case SPIRV::OpIsValidEvent:
2900 return MIRBuilder.buildInstr(Opcode)
2901 .addDef(Call->ReturnRegister)
2902 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2903 .addUse(Call->Arguments[0]);
2904 case SPIRV::OpSetUserEventStatus:
2905 return MIRBuilder.buildInstr(Opcode)
2906 .addUse(Call->Arguments[0])
2907 .addUse(Call->Arguments[1]);
2908 case SPIRV::OpCaptureEventProfilingInfo:
2909 return MIRBuilder.buildInstr(Opcode)
2910 .addUse(Call->Arguments[0])
2911 .addUse(Call->Arguments[1])
2912 .addUse(Call->Arguments[2]);
2913 case SPIRV::OpBuildNDRange:
2914 return buildNDRange(Call, MIRBuilder, GR);
2915 case SPIRV::OpEnqueueKernel:
2916 return buildEnqueueKernel(Call, MIRBuilder, GR);
2917 default:
2918 return false;
2919 }
2920}
2921
2923 MachineIRBuilder &MIRBuilder,
2924 SPIRVGlobalRegistry *GR) {
2925 // Lookup the instruction opcode in the TableGen records.
2926 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2927 unsigned Opcode =
2928 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2929
2930 bool IsSet = Opcode == SPIRV::OpGroupAsyncCopy;
2931 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2932 if (Call->isSpirvOp())
2933 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2934 IsSet ? TypeReg : Register(0));
2935
2936 auto Scope = buildConstantIntReg32(SPIRV::Scope::Workgroup, MIRBuilder, GR);
2937
2938 switch (Opcode) {
2939 case SPIRV::OpGroupAsyncCopy: {
2940 SPIRVTypeInst NewType =
2941 Call->ReturnType->getOpcode() == SPIRV::OpTypeEvent
2942 ? nullptr
2943 : GR->getOrCreateSPIRVTypeByName("spirv.Event", MIRBuilder, true);
2944 Register TypeReg = GR->getSPIRVTypeID(NewType ? NewType : Call->ReturnType);
2945 unsigned NumArgs = Call->Arguments.size();
2946 Register EventReg = Call->Arguments[NumArgs - 1];
2947 bool Res = MIRBuilder.buildInstr(Opcode)
2948 .addDef(Call->ReturnRegister)
2949 .addUse(TypeReg)
2950 .addUse(Scope)
2951 .addUse(Call->Arguments[0])
2952 .addUse(Call->Arguments[1])
2953 .addUse(Call->Arguments[2])
2954 .addUse(Call->Arguments.size() > 4
2955 ? Call->Arguments[3]
2956 : buildConstantIntReg32(1, MIRBuilder, GR))
2957 .addUse(EventReg);
2958 if (NewType)
2959 updateRegType(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder,
2960 MIRBuilder.getMF().getRegInfo());
2961 return Res;
2962 }
2963 case SPIRV::OpGroupWaitEvents:
2964 return MIRBuilder.buildInstr(Opcode)
2965 .addUse(Scope)
2966 .addUse(Call->Arguments[0])
2967 .addUse(Call->Arguments[1]);
2968 default:
2969 return false;
2970 }
2971}
2972
2973static bool generateConvertInst(const StringRef DemangledCall,
2975 MachineIRBuilder &MIRBuilder,
2976 SPIRVGlobalRegistry *GR) {
2977 // Lookup the conversion builtin in the TableGen records.
2978 const SPIRV::ConvertBuiltin *Builtin =
2979 SPIRV::lookupConvertBuiltin(Call->Builtin->Name, Call->Builtin->Set);
2980
2981 if (!Builtin && Call->isSpirvOp()) {
2982 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2983 unsigned Opcode =
2984 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2985 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2986 GR->getSPIRVTypeID(Call->ReturnType));
2987 }
2988
2989 assert(Builtin && "Conversion builtin not found.");
2990 if (Builtin->IsSaturated)
2991 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
2992 SPIRV::Decoration::SaturatedConversion, {});
2993
2994 if (Builtin->IsRounded) {
2995 bool AnyTypeIsFloat =
2996 GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeFloat) ||
2997 GR->isScalarOrVectorOfType(Call->Arguments[0], SPIRV::OpTypeFloat);
2998
2999 // Rounding mode decorations are only valid for floating point types.
3000 // Conversion builtins from integer to integer are equivalent to their
3001 // non-rounded counterparts.
3002 if (AnyTypeIsFloat) {
3003 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
3004 SPIRV::Decoration::FPRoundingMode,
3005 {(unsigned)Builtin->RoundingMode});
3006 }
3007 }
3008
3009 std::string NeedExtMsg; // no errors if empty
3010 bool IsRightComponentsNumber = true; // check if input/output accepts vectors
3011 unsigned Opcode = SPIRV::OpNop;
3012 if (GR->isScalarOrVectorOfType(Call->Arguments[0], SPIRV::OpTypeInt)) {
3013 // Int -> ...
3014 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
3015 // Int -> Int
3016 if (Builtin->IsSaturated)
3017 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpSatConvertUToS
3018 : SPIRV::OpSatConvertSToU;
3019 else
3020 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpUConvert
3021 : SPIRV::OpSConvert;
3022 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
3023 SPIRV::OpTypeFloat)) {
3024 // Int -> Float
3025 if (Builtin->IsBfloat16) {
3026 const auto *ST = static_cast<const SPIRVSubtarget *>(
3027 &MIRBuilder.getMF().getSubtarget());
3028 if (!ST->canUseExtension(
3029 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
3030 NeedExtMsg = "SPV_INTEL_bfloat16_conversion";
3031 IsRightComponentsNumber =
3032 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
3033 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
3034 Opcode = SPIRV::OpConvertBF16ToFINTEL;
3035 } else {
3036 bool IsSourceSigned =
3037 DemangledCall[DemangledCall.find_first_of('(') + 1] != 'u';
3038 Opcode = IsSourceSigned ? SPIRV::OpConvertSToF : SPIRV::OpConvertUToF;
3039 }
3040 }
3041 } else if (GR->isScalarOrVectorOfType(Call->Arguments[0],
3042 SPIRV::OpTypeFloat)) {
3043 // Float -> ...
3044 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
3045 // Float -> Int
3046 if (Builtin->IsBfloat16) {
3047 const auto *ST = static_cast<const SPIRVSubtarget *>(
3048 &MIRBuilder.getMF().getSubtarget());
3049 if (!ST->canUseExtension(
3050 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
3051 NeedExtMsg = "SPV_INTEL_bfloat16_conversion";
3052 IsRightComponentsNumber =
3053 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
3054 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
3055 Opcode = SPIRV::OpConvertFToBF16INTEL;
3056 } else {
3057 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpConvertFToS
3058 : SPIRV::OpConvertFToU;
3059 }
3060 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
3061 SPIRV::OpTypeFloat)) {
3062 if (Builtin->IsTF32) {
3063 const auto *ST = static_cast<const SPIRVSubtarget *>(
3064 &MIRBuilder.getMF().getSubtarget());
3065 if (!ST->canUseExtension(
3066 SPIRV::Extension::SPV_INTEL_tensor_float32_conversion))
3067 NeedExtMsg = "SPV_INTEL_tensor_float32_conversion";
3068 IsRightComponentsNumber =
3069 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
3070 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
3071 Opcode = SPIRV::OpRoundFToTF32INTEL;
3072 } else {
3073 // Float -> Float
3074 Opcode = SPIRV::OpFConvert;
3075 }
3076 }
3077 }
3078
3079 if (!NeedExtMsg.empty()) {
3080 std::string DiagMsg = std::string(Builtin->Name) +
3081 ": the builtin requires the following SPIR-V "
3082 "extension: " +
3083 NeedExtMsg;
3084 report_fatal_error(DiagMsg.c_str(), false);
3085 }
3086 if (!IsRightComponentsNumber) {
3087 std::string DiagMsg =
3088 std::string(Builtin->Name) +
3089 ": result and argument must have the same number of components";
3090 report_fatal_error(DiagMsg.c_str(), false);
3091 }
3092 assert(Opcode != SPIRV::OpNop &&
3093 "Conversion between the types not implemented!");
3094
3095 MIRBuilder.buildInstr(Opcode)
3096 .addDef(Call->ReturnRegister)
3097 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
3098 .addUse(Call->Arguments[0]);
3099 return true;
3100}
3101
3103 MachineIRBuilder &MIRBuilder,
3104 SPIRVGlobalRegistry *GR) {
3105 // Lookup the vector load/store builtin in the TableGen records.
3106 const SPIRV::VectorLoadStoreBuiltin *Builtin =
3107 SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
3108 Call->Builtin->Set);
3109 // Build extended instruction.
3110 auto MIB =
3111 MIRBuilder.buildInstr(SPIRV::OpExtInst)
3112 .addDef(Call->ReturnRegister)
3113 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
3114 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
3115 .addImm(Builtin->Number);
3116 for (auto Argument : Call->Arguments)
3117 MIB.addUse(Argument);
3118 if (Builtin->Name.contains("load") && Builtin->ElementCount > 1)
3119 MIB.addImm(Builtin->ElementCount);
3120
3121 // Rounding mode should be passed as a last argument in the MI for builtins
3122 // like "vstorea_halfn_r".
3123 if (Builtin->IsRounded)
3124 MIB.addImm(static_cast<uint32_t>(Builtin->RoundingMode));
3125 return true;
3126}
3127
3129 MachineIRBuilder &MIRBuilder,
3130 SPIRVGlobalRegistry *GR) {
3131 const auto *Builtin = Call->Builtin;
3132 auto *MRI = MIRBuilder.getMRI();
3133 unsigned Opcode =
3134 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
3135 const Type *RetTy = GR->getTypeForSPIRVType(Call->ReturnType);
3136 bool IsVoid = RetTy->isVoidTy();
3137 auto MIB = MIRBuilder.buildInstr(Opcode);
3138 Register DestReg;
3139 if (IsVoid) {
3140 LLT PtrTy = MRI->getType(Call->Arguments[0]);
3141 DestReg = MRI->createGenericVirtualRegister(PtrTy);
3142 MRI->setRegClass(DestReg, &SPIRV::pIDRegClass);
3143 SPIRVTypeInst PointeeTy =
3144 GR->getPointeeType(GR->getSPIRVTypeForVReg(Call->Arguments[0]));
3145 MIB.addDef(DestReg);
3146 MIB.addUse(GR->getSPIRVTypeID(PointeeTy));
3147 } else {
3148 MIB.addDef(Call->ReturnRegister);
3149 MIB.addUse(GR->getSPIRVTypeID(Call->ReturnType));
3150 }
3151 for (unsigned i = IsVoid ? 1 : 0; i < Call->Arguments.size(); ++i) {
3152 Register Arg = Call->Arguments[i];
3153 MachineInstr *DefMI = MRI->getUniqueVRegDef(Arg);
3154 if (DefMI->getOpcode() == TargetOpcode::G_CONSTANT &&
3155 DefMI->getOperand(1).isCImm()) {
3156 MIB.addImm(getConstFromIntrinsic(Arg, MRI));
3157 } else {
3158 MIB.addUse(Arg);
3159 }
3160 }
3161 if (IsVoid) {
3162 LLT PtrTy = MRI->getType(Call->Arguments[0]);
3163 MachineMemOperand *MMO = MIRBuilder.getMF().getMachineMemOperand(
3165 PtrTy.getSizeInBytes(), Align(4));
3166 MIRBuilder.buildStore(DestReg, Call->Arguments[0], *MMO);
3167 }
3168 return true;
3169}
3170
3172 MachineIRBuilder &MIRBuilder,
3173 SPIRVGlobalRegistry *GR) {
3174 // Lookup the instruction opcode in the TableGen records.
3175 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
3176 unsigned Opcode =
3177 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
3178 bool IsLoad = Opcode == SPIRV::OpLoad;
3179 // Build the instruction.
3180 auto MIB = MIRBuilder.buildInstr(Opcode);
3181 if (IsLoad) {
3182 MIB.addDef(Call->ReturnRegister);
3183 MIB.addUse(GR->getSPIRVTypeID(Call->ReturnType));
3184 }
3185 // Add a pointer to the value to load/store.
3186 MIB.addUse(Call->Arguments[0]);
3187 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
3188 // Add a value to store.
3189 if (!IsLoad)
3190 MIB.addUse(Call->Arguments[1]);
3191 // Add optional memory attributes and an alignment.
3192 unsigned NumArgs = Call->Arguments.size();
3193 if ((IsLoad && NumArgs >= 2) || NumArgs >= 3)
3194 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 1 : 2], MRI));
3195 if ((IsLoad && NumArgs >= 3) || NumArgs >= 4)
3196 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 2 : 3], MRI));
3197 return true;
3198}
3199
3200namespace SPIRV {
3201// Try to find a builtin function attributes by a demangled function name and
3202// return a tuple <builtin group, op code, ext instruction number>, or a special
3203// tuple value <-1, 0, 0> if the builtin function is not found.
3204// Not all builtin functions are supported, only those with a ready-to-use op
3205// code or instruction number defined in TableGen.
3206// TODO: consider a major rework of mapping demangled calls into a builtin
3207// functions to unify search and decrease number of individual cases.
3208std::tuple<int, unsigned, unsigned>
3209mapBuiltinToOpcode(const StringRef DemangledCall,
3210 SPIRV::InstructionSet::InstructionSet Set) {
3211 Register Reg;
3213 std::unique_ptr<const IncomingCall> Call =
3214 lookupBuiltin(DemangledCall, Set, Reg, nullptr, Args);
3215 if (!Call)
3216 return std::make_tuple(-1, 0, 0);
3217
3218 switch (Call->Builtin->Group) {
3219 case SPIRV::Relational:
3220 case SPIRV::Atomic:
3221 case SPIRV::Barrier:
3222 case SPIRV::CastToPtr:
3223 case SPIRV::ImageMiscQuery:
3224 case SPIRV::SpecConstant:
3225 case SPIRV::Enqueue:
3226 case SPIRV::AsyncCopy:
3227 case SPIRV::LoadStore:
3228 case SPIRV::CoopMatr:
3229 if (const auto *R =
3230 SPIRV::lookupNativeBuiltin(Call->Builtin->Name, Call->Builtin->Set))
3231 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3232 break;
3233 case SPIRV::Extended:
3234 if (const auto *R = SPIRV::lookupExtendedBuiltin(Call->Builtin->Name,
3235 Call->Builtin->Set))
3236 return std::make_tuple(Call->Builtin->Group, 0, R->Number);
3237 break;
3238 case SPIRV::VectorLoadStore:
3239 if (const auto *R = SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
3240 Call->Builtin->Set))
3241 return std::make_tuple(SPIRV::Extended, 0, R->Number);
3242 break;
3243 case SPIRV::Group:
3244 if (const auto *R = SPIRV::lookupGroupBuiltin(Call->Builtin->Name))
3245 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3246 break;
3247 case SPIRV::AtomicFloating:
3248 if (const auto *R = SPIRV::lookupAtomicFloatingBuiltin(Call->Builtin->Name))
3249 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3250 break;
3251 case SPIRV::IntelSubgroups:
3252 if (const auto *R = SPIRV::lookupIntelSubgroupsBuiltin(Call->Builtin->Name))
3253 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3254 break;
3255 case SPIRV::GroupUniform:
3256 if (const auto *R = SPIRV::lookupGroupUniformBuiltin(Call->Builtin->Name))
3257 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3258 break;
3259 case SPIRV::IntegerDot:
3260 if (const auto *R =
3261 SPIRV::lookupIntegerDotProductBuiltin(Call->Builtin->Name))
3262 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3263 break;
3264 case SPIRV::WriteImage:
3265 return std::make_tuple(Call->Builtin->Group, SPIRV::OpImageWrite, 0);
3266 case SPIRV::Select:
3267 return std::make_tuple(Call->Builtin->Group, TargetOpcode::G_SELECT, 0);
3268 case SPIRV::Construct:
3269 return std::make_tuple(Call->Builtin->Group, SPIRV::OpCompositeConstruct,
3270 0);
3271 case SPIRV::KernelClock:
3272 return std::make_tuple(Call->Builtin->Group, SPIRV::OpReadClockKHR, 0);
3273 default:
3274 return std::make_tuple(-1, 0, 0);
3275 }
3276 return std::make_tuple(-1, 0, 0);
3277}
3278
3279std::optional<bool> lowerBuiltin(const StringRef DemangledCall,
3280 SPIRV::InstructionSet::InstructionSet Set,
3281 MachineIRBuilder &MIRBuilder,
3282 const Register OrigRet, const Type *OrigRetTy,
3283 const SmallVectorImpl<Register> &Args,
3284 SPIRVGlobalRegistry *GR, const CallBase &CB) {
3285 LLVM_DEBUG(dbgs() << "Lowering builtin call: " << DemangledCall << "\n");
3286
3287 // Lookup the builtin in the TableGen records.
3288 SPIRVTypeInst SpvType = GR->getSPIRVTypeForVReg(OrigRet);
3289 assert(SpvType && "Inconsistent return register: expected valid type info");
3290 std::unique_ptr<const IncomingCall> Call =
3291 lookupBuiltin(DemangledCall, Set, OrigRet, SpvType, Args);
3292
3293 if (!Call) {
3294 LLVM_DEBUG(dbgs() << "Builtin record was not found!\n");
3295 return std::nullopt;
3296 }
3297
3298 // Check if the provided args meet the builtin requirements. If not, treat
3299 // the call as a regular function call rather than crashing.
3300 if (Args.size() < Call->Builtin->MinNumArgs) {
3301 LLVM_DEBUG(dbgs() << "Too few arguments for builtin " << DemangledCall
3302 << ": expected at least " << Call->Builtin->MinNumArgs
3303 << ", got " << Args.size()
3304 << "; treating as a normal function\n");
3305 return std::nullopt;
3306 }
3307 if (Call->Builtin->MaxNumArgs && Args.size() > Call->Builtin->MaxNumArgs) {
3308 LLVM_DEBUG(dbgs() << "Too many arguments for builtin " << DemangledCall
3309 << ": expected at most " << Call->Builtin->MaxNumArgs
3310 << ", got " << Args.size()
3311 << "; treating as a normal function\n");
3312 return std::nullopt;
3313 }
3314
3315 // Match the builtin with implementation based on the grouping.
3316 switch (Call->Builtin->Group) {
3317 case SPIRV::Extended:
3318 return generateExtInst(Call.get(), MIRBuilder, GR, CB);
3319 case SPIRV::Relational:
3320 return generateRelationalInst(Call.get(), MIRBuilder, GR);
3321 case SPIRV::Group:
3322 return generateGroupInst(Call.get(), MIRBuilder, GR);
3323 case SPIRV::Variable:
3324 return generateBuiltinVar(Call.get(), MIRBuilder, GR);
3325 case SPIRV::Atomic:
3326 return generateAtomicInst(Call.get(), MIRBuilder, GR);
3327 case SPIRV::AtomicFloating:
3328 return generateAtomicFloatingInst(Call.get(), MIRBuilder, GR);
3329 case SPIRV::Barrier:
3330 return generateBarrierInst(Call.get(), MIRBuilder, GR);
3331 case SPIRV::CastToPtr:
3332 return generateCastToPtrInst(Call.get(), MIRBuilder, GR);
3333 case SPIRV::Dot:
3334 case SPIRV::IntegerDot:
3335 return generateDotOrFMulInst(DemangledCall, Call.get(), MIRBuilder, GR);
3336 case SPIRV::Wave:
3337 return generateWaveInst(Call.get(), MIRBuilder, GR);
3338 case SPIRV::ICarryBorrow:
3339 return generateICarryBorrowInst(Call.get(), MIRBuilder, GR);
3340 case SPIRV::GetQuery:
3341 return generateGetQueryInst(Call.get(), MIRBuilder, GR);
3342 case SPIRV::ImageSizeQuery:
3343 return generateImageSizeQueryInst(Call.get(), MIRBuilder, GR);
3344 case SPIRV::ImageMiscQuery:
3345 return generateImageMiscQueryInst(Call.get(), MIRBuilder, GR);
3346 case SPIRV::ReadImage:
3347 return generateReadImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
3348 case SPIRV::WriteImage:
3349 return generateWriteImageInst(Call.get(), MIRBuilder, GR);
3350 case SPIRV::SampleImage:
3351 return generateSampleImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
3352 case SPIRV::Select:
3353 return generateSelectInst(Call.get(), MIRBuilder);
3354 case SPIRV::Construct:
3355 return generateConstructInst(Call.get(), MIRBuilder, GR);
3356 case SPIRV::SpecConstant:
3357 return generateSpecConstantInst(Call.get(), MIRBuilder, GR);
3358 case SPIRV::Enqueue:
3359 return generateEnqueueInst(Call.get(), MIRBuilder, GR);
3360 case SPIRV::AsyncCopy:
3361 return generateAsyncCopy(Call.get(), MIRBuilder, GR);
3362 case SPIRV::Convert:
3363 return generateConvertInst(DemangledCall, Call.get(), MIRBuilder, GR);
3364 case SPIRV::VectorLoadStore:
3365 return generateVectorLoadStoreInst(Call.get(), MIRBuilder, GR);
3366 case SPIRV::LoadStore:
3367 return generateLoadStoreInst(Call.get(), MIRBuilder, GR);
3368 case SPIRV::IntelSubgroups:
3369 return generateIntelSubgroupsInst(Call.get(), MIRBuilder, GR);
3370 case SPIRV::GroupUniform:
3371 return generateGroupUniformInst(Call.get(), MIRBuilder, GR);
3372 case SPIRV::KernelClock:
3373 return generateKernelClockInst(Call.get(), MIRBuilder, GR);
3374 case SPIRV::CoopMatr:
3375 return generateCoopMatrInst(Call.get(), MIRBuilder, GR);
3376 case SPIRV::ExtendedBitOps:
3377 return generateExtendedBitOpsInst(Call.get(), MIRBuilder, GR);
3378 case SPIRV::BindlessINTEL:
3379 return generateBindlessImageINTELInst(Call.get(), MIRBuilder, GR);
3380 case SPIRV::TernaryBitwiseINTEL:
3381 return generateTernaryBitwiseFunctionINTELInst(Call.get(), MIRBuilder, GR);
3382 case SPIRV::Block2DLoadStore:
3383 return generate2DBlockIOINTELInst(Call.get(), MIRBuilder, GR);
3384 case SPIRV::Pipe:
3385 return generatePipeInst(Call.get(), MIRBuilder, GR);
3386 case SPIRV::PredicatedLoadStore:
3387 return generatePredicatedLoadStoreInst(Call.get(), MIRBuilder, GR);
3388 case SPIRV::BlockingPipes:
3389 return generateBlockingPipesInst(Call.get(), MIRBuilder, GR);
3390 case SPIRV::ArbitraryPrecisionFixedPoint:
3391 return generateAPFixedPointInst(Call.get(), MIRBuilder, GR);
3392 case SPIRV::ImageChannelDataTypes:
3393 return generateImageChannelDataTypeInst(Call.get(), MIRBuilder, GR);
3394 case SPIRV::ArbitraryFloatingPoint:
3395 return generateAFPInst(Call.get(), MIRBuilder, GR);
3396 }
3397 return false;
3398}
3399
3401 // Parse strings representing OpenCL builtin types.
3402 if (hasBuiltinTypePrefix(TypeStr)) {
3403 // OpenCL builtin types in demangled call strings have the following format:
3404 // e.g. ocl_image2d_ro
3405 [[maybe_unused]] bool IsOCLBuiltinType = TypeStr.consume_front("ocl_");
3406 assert(IsOCLBuiltinType && "Invalid OpenCL builtin prefix");
3407
3408 // Check if this is pointer to a builtin type and not just pointer
3409 // representing a builtin type. In case it is a pointer to builtin type,
3410 // this will require additional handling in the method calling
3411 // parseBuiltinCallArgumentBaseType(...) as this function only retrieves the
3412 // base types.
3413 if (TypeStr.ends_with("*"))
3414 TypeStr = TypeStr.slice(0, TypeStr.find_first_of(" *"));
3415
3416 return parseBuiltinTypeNameToTargetExtType("opencl." + TypeStr.str() + "_t",
3417 Ctx);
3418 }
3419
3420 // Parse type name in either "typeN" or "type vector[N]" format, where
3421 // N is the number of elements of the vector.
3422 Type *BaseType;
3423 unsigned VecElts = 0;
3424
3425 BaseType = parseBasicTypeName(TypeStr, Ctx);
3426 if (!BaseType)
3427 // Unable to recognize SPIRV type name.
3428 return nullptr;
3429
3430 // Handle "typeN*" or "type vector[N]*".
3431 TypeStr.consume_back("*");
3432
3433 if (TypeStr.consume_front(" vector["))
3434 TypeStr = TypeStr.substr(0, TypeStr.find(']'));
3435
3436 TypeStr.getAsInteger(10, VecElts);
3437 if (VecElts > 0)
3439 BaseType->isVoidTy() ? Type::getInt8Ty(Ctx) : BaseType, VecElts, false);
3440
3441 return BaseType;
3442}
3443
3445 const StringRef DemangledCall, LLVMContext &Ctx) {
3446 auto Pos1 = DemangledCall.find('(');
3447 if (Pos1 == StringRef::npos)
3448 return false;
3449 auto Pos2 = DemangledCall.find(')');
3450 if (Pos2 == StringRef::npos || Pos1 > Pos2)
3451 return false;
3452 DemangledCall.slice(Pos1 + 1, Pos2)
3453 .split(BuiltinArgsTypeStrs, ',', -1, false);
3454 return true;
3455}
3456
3458 unsigned ArgIdx, LLVMContext &Ctx) {
3459 SmallVector<StringRef, 10> BuiltinArgsTypeStrs;
3460 parseBuiltinTypeStr(BuiltinArgsTypeStrs, DemangledCall, Ctx);
3461 if (ArgIdx >= BuiltinArgsTypeStrs.size())
3462 return nullptr;
3463 StringRef TypeStr = BuiltinArgsTypeStrs[ArgIdx].trim();
3464 return parseBuiltinCallArgumentType(TypeStr, Ctx);
3465}
3466
3471
3472#define GET_BuiltinTypes_DECL
3473#define GET_BuiltinTypes_IMPL
3474
3479
3480#define GET_OpenCLTypes_DECL
3481#define GET_OpenCLTypes_IMPL
3482
3483#include "SPIRVGenTables.inc"
3484} // namespace SPIRV
3485
3486//===----------------------------------------------------------------------===//
3487// Misc functions for parsing builtin types.
3488//===----------------------------------------------------------------------===//
3489
3490static Type *parseTypeString(const StringRef Name, LLVMContext &Context) {
3491 if (Name.starts_with("void"))
3492 return Type::getVoidTy(Context);
3493 else if (Name.starts_with("int") || Name.starts_with("uint"))
3494 return Type::getInt32Ty(Context);
3495 else if (Name.starts_with("float"))
3496 return Type::getFloatTy(Context);
3497 else if (Name.starts_with("half"))
3498 return Type::getHalfTy(Context);
3499 report_fatal_error("Unable to recognize type!");
3500}
3501
3502//===----------------------------------------------------------------------===//
3503// Implementation functions for builtin types.
3504//===----------------------------------------------------------------------===//
3505
3506static SPIRVTypeInst
3508 const SPIRV::BuiltinType *TypeRecord,
3509 MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
3510 unsigned Opcode = TypeRecord->Opcode;
3511 // Create or get an existing type from GlobalRegistry.
3512 return GR->getOrCreateOpTypeByOpcode(ExtensionType, MIRBuilder, Opcode);
3513}
3514
3516 SPIRVGlobalRegistry *GR) {
3517 // Create or get an existing type from GlobalRegistry.
3518 return GR->getOrCreateOpTypeSampler(MIRBuilder);
3519}
3520
3521static SPIRVTypeInst getPipeType(const TargetExtType *ExtensionType,
3522 MachineIRBuilder &MIRBuilder,
3523 SPIRVGlobalRegistry *GR) {
3524 assert(ExtensionType->getNumIntParameters() == 1 &&
3525 "Invalid number of parameters for SPIR-V pipe builtin!");
3526 // Create or get an existing type from GlobalRegistry.
3527 return GR->getOrCreateOpTypePipe(MIRBuilder,
3528 SPIRV::AccessQualifier::AccessQualifier(
3529 ExtensionType->getIntParameter(0)));
3530}
3531
3532static SPIRVTypeInst getCoopMatrType(const TargetExtType *ExtensionType,
3533 MachineIRBuilder &MIRBuilder,
3534 SPIRVGlobalRegistry *GR) {
3535 assert(ExtensionType->getNumIntParameters() == 4 &&
3536 "Invalid number of parameters for SPIR-V coop matrices builtin!");
3537 assert(ExtensionType->getNumTypeParameters() == 1 &&
3538 "SPIR-V coop matrices builtin type must have a type parameter!");
3539 SPIRVTypeInst ElemType =
3540 GR->getOrCreateSPIRVType(ExtensionType->getTypeParameter(0), MIRBuilder,
3541 SPIRV::AccessQualifier::ReadWrite, true);
3542 // Create or get an existing type from GlobalRegistry.
3543 return GR->getOrCreateOpTypeCoopMatr(
3544 MIRBuilder, ExtensionType, ElemType, ExtensionType->getIntParameter(0),
3545 ExtensionType->getIntParameter(1), ExtensionType->getIntParameter(2),
3546 ExtensionType->getIntParameter(3), true);
3547}
3548
3550 MachineIRBuilder &MIRBuilder,
3551 SPIRVGlobalRegistry *GR) {
3552 SPIRVTypeInst OpaqueImageType = GR->getImageType(
3553 OpaqueType, SPIRV::AccessQualifier::ReadOnly, MIRBuilder);
3554 // Create or get an existing type from GlobalRegistry.
3555 return GR->getOrCreateOpTypeSampledImage(OpaqueImageType, MIRBuilder);
3556}
3557
3559 MachineIRBuilder &MIRBuilder,
3560 SPIRVGlobalRegistry *GR) {
3561 assert(ExtensionType->getNumIntParameters() == 3 &&
3562 "Inline SPIR-V type builtin takes an opcode, size, and alignment "
3563 "parameter");
3564 auto Opcode = ExtensionType->getIntParameter(0);
3565
3566 SmallVector<MCOperand> Operands;
3567 for (Type *Param : ExtensionType->type_params()) {
3568 if (const TargetExtType *ParamEType = dyn_cast<TargetExtType>(Param)) {
3569 if (ParamEType->getName() == "spirv.IntegralConstant") {
3570 assert(ParamEType->getNumTypeParameters() == 1 &&
3571 "Inline SPIR-V integral constant builtin must have a type "
3572 "parameter");
3573 assert(ParamEType->getNumIntParameters() == 1 &&
3574 "Inline SPIR-V integral constant builtin must have a "
3575 "value parameter");
3576
3577 auto OperandValue = ParamEType->getIntParameter(0);
3578 auto *OperandType = ParamEType->getTypeParameter(0);
3579
3580 SPIRVTypeInst OperandSPIRVType = GR->getOrCreateSPIRVType(
3581 OperandType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, true);
3582
3584 OperandValue, MIRBuilder, OperandSPIRVType, true)));
3585 continue;
3586 } else if (ParamEType->getName() == "spirv.Literal") {
3587 assert(ParamEType->getNumTypeParameters() == 0 &&
3588 "Inline SPIR-V literal builtin does not take type "
3589 "parameters");
3590 assert(ParamEType->getNumIntParameters() == 1 &&
3591 "Inline SPIR-V literal builtin must have an integer "
3592 "parameter");
3593
3594 auto OperandValue = ParamEType->getIntParameter(0);
3595
3596 Operands.push_back(MCOperand::createImm(OperandValue));
3597 continue;
3598 }
3599 }
3600 SPIRVTypeInst TypeOperand = GR->getOrCreateSPIRVType(
3601 Param, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, true);
3602 Operands.push_back(MCOperand::createReg(GR->getSPIRVTypeID(TypeOperand)));
3603 }
3604
3605 return GR->getOrCreateUnknownType(ExtensionType, MIRBuilder, Opcode,
3606 Operands);
3607}
3608
3610 MachineIRBuilder &MIRBuilder,
3611 SPIRVGlobalRegistry *GR) {
3612 assert(ExtensionType->getNumTypeParameters() == 1 &&
3613 "Vulkan buffers have exactly one type for the type of the buffer.");
3614 assert(ExtensionType->getNumIntParameters() == 2 &&
3615 "Vulkan buffer have 2 integer parameters: storage class and is "
3616 "writable.");
3617
3618 auto *T = ExtensionType->getTypeParameter(0);
3619 auto SC = static_cast<SPIRV::StorageClass::StorageClass>(
3620 ExtensionType->getIntParameter(0));
3621 bool IsWritable = ExtensionType->getIntParameter(1);
3622 return GR->getOrCreateVulkanBufferType(MIRBuilder, T, SC, IsWritable);
3623}
3624
3625static SPIRVTypeInst
3627 MachineIRBuilder &MIRBuilder,
3628 SPIRVGlobalRegistry *GR) {
3629 assert(ExtensionType->getNumTypeParameters() == 1 &&
3630 "Vulkan push constants have exactly one type as argument.");
3631 auto *T = ExtensionType->getTypeParameter(0);
3632 return GR->getOrCreateVulkanPushConstantType(MIRBuilder, T);
3633}
3634
3635static SPIRVTypeInst getLayoutType(const TargetExtType *ExtensionType,
3636 MachineIRBuilder &MIRBuilder,
3637 SPIRVGlobalRegistry *GR) {
3638 return GR->getOrCreateLayoutType(MIRBuilder, ExtensionType);
3639}
3640
3641namespace SPIRV {
3643 LLVMContext &Context) {
3644 StringRef NameWithParameters = TypeName;
3645
3646 // Pointers-to-opaque-structs representing OpenCL types are first translated
3647 // to equivalent SPIR-V types. OpenCL builtin type names should have the
3648 // following format: e.g. %opencl.event_t
3649 if (NameWithParameters.starts_with("opencl.")) {
3650 const SPIRV::OpenCLType *OCLTypeRecord =
3651 SPIRV::lookupOpenCLType(NameWithParameters);
3652 if (!OCLTypeRecord)
3653 report_fatal_error("Missing TableGen record for OpenCL type: " +
3654 NameWithParameters);
3655 NameWithParameters = OCLTypeRecord->SpirvTypeLiteral;
3656 // Continue with the SPIR-V builtin type...
3657 }
3658
3659 // Names of the opaque structs representing a SPIR-V builtins without
3660 // parameters should have the following format: e.g. %spirv.Event
3661 assert(NameWithParameters.starts_with("spirv.") &&
3662 "Unknown builtin opaque type!");
3663
3664 // Parameterized SPIR-V builtins names follow this format:
3665 // e.g. %spirv.Image._void_1_0_0_0_0_0_0, %spirv.Pipe._0
3666 if (!NameWithParameters.contains('_'))
3667 return TargetExtType::get(Context, NameWithParameters);
3668
3669 SmallVector<StringRef> Parameters;
3670 unsigned BaseNameLength = NameWithParameters.find('_') - 1;
3671 SplitString(NameWithParameters.substr(BaseNameLength + 1), Parameters, "_");
3672
3673 SmallVector<Type *, 1> TypeParameters;
3674 bool HasTypeParameter = !isDigit(Parameters[0][0]);
3675 if (HasTypeParameter)
3676 TypeParameters.push_back(parseTypeString(Parameters[0], Context));
3677 SmallVector<unsigned> IntParameters;
3678 for (unsigned i = HasTypeParameter ? 1 : 0; i < Parameters.size(); i++) {
3679 unsigned IntParameter = 0;
3680 bool ValidLiteral = !Parameters[i].getAsInteger(10, IntParameter);
3681 (void)ValidLiteral;
3682 assert(ValidLiteral &&
3683 "Invalid format of SPIR-V builtin parameter literal!");
3684 IntParameters.push_back(IntParameter);
3685 }
3686 return TargetExtType::get(Context,
3687 NameWithParameters.substr(0, BaseNameLength),
3688 TypeParameters, IntParameters);
3689}
3690
3692lowerBuiltinType(const Type *OpaqueType,
3693 SPIRV::AccessQualifier::AccessQualifier AccessQual,
3694 MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
3695 // In LLVM IR, SPIR-V and OpenCL builtin types are represented as either
3696 // target(...) target extension types or pointers-to-opaque-structs. The
3697 // approach relying on structs is deprecated and works only in the non-opaque
3698 // pointer mode (-opaque-pointers=0).
3699 // In order to maintain compatibility with LLVM IR generated by older versions
3700 // of Clang and LLVM/SPIR-V Translator, the pointers-to-opaque-structs are
3701 // "translated" to target extension types. This translation is temporary and
3702 // will be removed in the future release of LLVM.
3704 if (!BuiltinType)
3706 OpaqueType->getStructName().str(), MIRBuilder.getContext());
3707
3708 unsigned NumStartingVRegs = MIRBuilder.getMRI()->getNumVirtRegs();
3709
3710 const StringRef Name = BuiltinType->getName();
3711 LLVM_DEBUG(dbgs() << "Lowering builtin type: " << Name << "\n");
3712
3713 SPIRVTypeInst TargetType = nullptr;
3714 if (Name == "spirv.Type") {
3715 TargetType = getInlineSpirvType(BuiltinType, MIRBuilder, GR);
3716 } else if (Name == "spirv.VulkanBuffer") {
3717 TargetType = getVulkanBufferType(BuiltinType, MIRBuilder, GR);
3718 } else if (Name == "spirv.Padding") {
3719 TargetType = GR->getOrCreatePaddingType(MIRBuilder);
3720 } else if (Name == "spirv.PushConstant") {
3721 TargetType = getVulkanPushConstantType(BuiltinType, MIRBuilder, GR);
3722 } else if (Name == "spirv.Layout") {
3723 TargetType = getLayoutType(BuiltinType, MIRBuilder, GR);
3724 } else {
3725 // Lookup the demangled builtin type in the TableGen records.
3726 const SPIRV::BuiltinType *TypeRecord = SPIRV::lookupBuiltinType(Name);
3727 if (!TypeRecord)
3728 report_fatal_error("Missing TableGen record for builtin type: " + Name);
3729
3730 // "Lower" the BuiltinType into TargetType. The following get<...>Type
3731 // methods use the implementation details from TableGen records or
3732 // TargetExtType parameters to either create a new OpType<...> machine
3733 // instruction or get an existing equivalent SPIRV type from
3734 // GlobalRegistry.
3735
3736 switch (TypeRecord->Opcode) {
3737 case SPIRV::OpTypeImage:
3738 TargetType = GR->getImageType(BuiltinType, AccessQual, MIRBuilder);
3739 break;
3740 case SPIRV::OpTypePipe:
3741 TargetType = getPipeType(BuiltinType, MIRBuilder, GR);
3742 break;
3743 case SPIRV::OpTypeDeviceEvent:
3744 TargetType = GR->getOrCreateOpTypeDeviceEvent(MIRBuilder);
3745 break;
3746 case SPIRV::OpTypeSampler:
3747 TargetType = getSamplerType(MIRBuilder, GR);
3748 break;
3749 case SPIRV::OpTypeSampledImage:
3750 TargetType = getSampledImageType(BuiltinType, MIRBuilder, GR);
3751 break;
3752 case SPIRV::OpTypeCooperativeMatrixKHR:
3753 TargetType = getCoopMatrType(BuiltinType, MIRBuilder, GR);
3754 break;
3755 default:
3756 TargetType =
3757 getNonParameterizedType(BuiltinType, TypeRecord, MIRBuilder, GR);
3758 break;
3759 }
3760 }
3761
3762 // Emit OpName instruction if a new OpType<...> instruction was added
3763 // (equivalent type was not found in GlobalRegistry).
3764 if (NumStartingVRegs < MIRBuilder.getMRI()->getNumVirtRegs())
3765 buildOpName(GR->getSPIRVTypeID(TargetType), Name, MIRBuilder);
3766
3767 return TargetType;
3768}
3769} // namespace SPIRV
3770} // namespace llvm
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Lower Kernel Arguments
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
Promote Memory to Register
Definition Mem2Reg.cpp:110
#define T
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file contains some functions that are useful when dealing with strings.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static const fltSemantics & IEEEsingle()
Definition APFloat.h:296
APInt bitcastToAPInt() const
Definition APFloat.h:1408
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Definition APFloat.h:1134
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1555
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
LLVM_ABI FPClassTest getParamNoFPClass(unsigned i) const
Extract a test mask for disallowed floating-point value classes for the parameter.
LLVM_ABI FPClassTest getRetNoFPClass() const
Extract a test mask for disallowed floating-point value classes for the return value.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
const APFloat & getValueAPF() const
Definition Constants.h:463
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Tagged union holding either a T or a Error.
Definition Error.h:485
Class to represent fixed width SIMD vectors.
Class to represent function types.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Type * getParamType(unsigned i) const
Parameter type accessors.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:358
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:354
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
static MCOperand createReg(MCRegister Reg)
Definition MCInst.h:138
static MCOperand createImm(int64_t Val)
Definition MCInst.h:145
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
LLVMContext & getContext() const
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
const DataLayout & getDataLayout() const
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addDef(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register definition operand.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
LLVM_ABI void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
void setFlag(MIFlag Flag)
Set a MI flag.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
const GlobalValue * getGlobal() const
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
const MDNode * getMetadata() const
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
LLVM_ABI void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
LLVM_ABI void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
const TargetRegisterClass * getRegClassOrNull(Register Reg) const
Return the register class of Reg, or null if Reg has not been assigned a register class yet.
unsigned getNumVirtRegs() const
getNumVirtRegs - Return the number of virtual registers created.
LLVM_ABI MachineInstr * getUniqueVRegDef(Register Reg) const
getUniqueVRegDef - Return the unique machine instr that defines the specified virtual register or nul...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isValid() const
Definition Register.h:112
SPIRVTypeInst getImageType(const TargetExtType *ExtensionType, const SPIRV::AccessQualifier::AccessQualifier Qualifier, MachineIRBuilder &MIRBuilder)
SPIRVTypeInst getOrCreateOpTypeSampledImage(SPIRVTypeInst ImageType, MachineIRBuilder &MIRBuilder)
void assignSPIRVTypeToVReg(SPIRVTypeInst Type, Register VReg, const MachineFunction &MF)
const TargetRegisterClass * getRegClass(SPIRVTypeInst SpvType) const
unsigned getScalarOrVectorBitWidth(SPIRVTypeInst Type) const
SPIRVTypeInst getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineIRBuilder &MIRBuilder)
SPIRVTypeInst getOrCreateSPIRVVectorType(SPIRVTypeInst BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder, bool EmitIR)
SPIRVTypeInst getOrCreateSPIRVTypeByName(StringRef TypeStr, MachineIRBuilder &MIRBuilder, bool EmitIR, SPIRV::StorageClass::StorageClass SC=SPIRV::StorageClass::Function, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite)
Register buildGlobalVariable(Register Reg, SPIRVTypeInst BaseType, StringRef Name, const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage, const MachineInstr *Init, bool IsConst, const std::optional< SPIRV::LinkageType::LinkageType > &LinkageType, MachineIRBuilder &MIRBuilder, bool IsInstSelector)
SPIRVTypeInst getOrCreateOpTypeByOpcode(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode)
unsigned getScalarOrVectorComponentCount(Register VReg) const
const Type * getTypeForSPIRVType(SPIRVTypeInst Ty) const
SPIRVTypeInst getOrCreatePaddingType(MachineIRBuilder &MIRBuilder)
LLT getRegType(SPIRVTypeInst SpvType) const
SPIRVTypeInst getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder, bool EmitIR)
SPIRVTypeInst getOrCreateSPIRVPointerType(const Type *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SC)
bool isScalarOfType(Register VReg, unsigned TypeOpcode) const
Register getSPIRVTypeID(SPIRVTypeInst SpirvType) const
Register getOrCreateConstIntArray(uint64_t Val, size_t Num, MachineInstr &I, SPIRVTypeInst SpvType, const SPIRVInstrInfo &TII)
SPIRVTypeInst getOrCreateOpTypeCoopMatr(MachineIRBuilder &MIRBuilder, const TargetExtType *ExtensionType, SPIRVTypeInst ElemType, uint32_t Scope, uint32_t Rows, uint32_t Columns, uint32_t Use, bool EmitIR)
SPIRVTypeInst getOrCreateUnknownType(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode, const ArrayRef< MCOperand > Operands)
Register buildConstantFP(APFloat Val, MachineIRBuilder &MIRBuilder, SPIRVTypeInst SpvType=nullptr)
SPIRVTypeInst getOrCreateOpTypePipe(MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AccQual)
SPIRVTypeInst getOrCreateVulkanBufferType(MachineIRBuilder &MIRBuilder, Type *ElemType, SPIRV::StorageClass::StorageClass SC, bool IsWritable, bool EmitIr=false)
SPIRVTypeInst getPointeeType(SPIRVTypeInst PtrType)
SPIRVTypeInst getOrCreateSPIRVType(const Type *Type, MachineInstr &I, SPIRV::AccessQualifier::AccessQualifier AQ, bool EmitIR)
Register getOrCreateConsIntVector(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVTypeInst SpvType, bool EmitIR)
bool isScalarOrVectorOfType(Register VReg, unsigned TypeOpcode) const
SPIRVTypeInst getOrCreateLayoutType(MachineIRBuilder &MIRBuilder, const TargetExtType *T, bool EmitIr=false)
Register getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, SPIRVTypeInst SpvType)
SPIRVTypeInst getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
SPIRVTypeInst getOrCreateOpTypeSampler(MachineIRBuilder &MIRBuilder)
SPIRV::StorageClass::StorageClass getPointerStorageClass(Register VReg) const
Register buildConstantSampler(Register Res, unsigned AddrMode, unsigned Param, unsigned FilerMode, MachineIRBuilder &MIRBuilder)
Register buildConstantInt(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVTypeInst SpvType, bool EmitIR, bool ZeroAsNull=true)
SPIRVTypeInst getOrCreateVulkanPushConstantType(MachineIRBuilder &MIRBuilder, Type *ElemType)
SPIRVTypeInst getOrCreateOpTypeDeviceEvent(MachineIRBuilder &MIRBuilder)
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:730
static constexpr size_t npos
Definition StringRef.h:57
bool consume_back(StringRef Suffix)
Returns true if this StringRef has the given suffix and removes that suffix.
Definition StringRef.h:685
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:490
std::string str() const
str - Get the contents as an std::string.
Definition StringRef.h:222
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition StringRef.h:591
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:258
bool contains_insensitive(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition StringRef.h:456
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
Definition StringRef.h:714
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:143
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition StringRef.h:446
size_t find_first_of(char C, size_t From=0) const
Find the first character in the string that is C, or npos if not found.
Definition StringRef.h:396
size_t find(char C, size_t From=0) const
Search for the first character C in the string.
Definition StringRef.h:290
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition StringRef.h:270
bool consume_front(char Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
Definition StringRef.h:655
A switch()-like statement whose cases are string literals.
StringSwitch & EndsWith(StringLiteral S, T Value)
Class to represent target extensions types, which are generally unintrospectable from target-independ...
ArrayRef< Type * > type_params() const
Return the type parameters for this particular target extension type.
unsigned getNumIntParameters() const
static LLVM_ABI TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types={}, ArrayRef< unsigned > Ints={})
Return a target extension type having the specified name and optional type and integer parameters.
Definition Type.cpp:978
Type * getTypeParameter(unsigned i) const
unsigned getNumTypeParameters() const
unsigned getIntParameter(unsigned i) const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:313
LLVM_ABI StringRef getStructName() const
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:286
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:311
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:186
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
Definition Type.cpp:290
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
Definition Type.cpp:288
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:141
LLVM Value Representation.
Definition Value.h:75
LLVM_ABI Value(Type *Ty, unsigned scid)
Definition Value.cpp:53
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Represents a version number in the form major[.minor[.subminor[.build]]].
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
CallInst * Call
LLVM_C_ABI LLVMTypeRef LLVMVectorType(LLVMTypeRef ElementType, unsigned ElementCount)
Create a vector type that contains a defined type and has a specific number of elements.
Definition Core.cpp:911
std::string lookupBuiltinNameHelper(StringRef DemangledCall, FPDecorationId *DecorationId)
Parses the name part of the demangled builtin call.
Type * parseBuiltinCallArgumentType(StringRef TypeStr, LLVMContext &Ctx)
bool parseBuiltinTypeStr(SmallVector< StringRef, 10 > &BuiltinArgsTypeStrs, const StringRef DemangledCall, LLVMContext &Ctx)
std::optional< bool > lowerBuiltin(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, MachineIRBuilder &MIRBuilder, const Register OrigRet, const Type *OrigRetTy, const SmallVectorImpl< Register > &Args, SPIRVGlobalRegistry *GR, const CallBase &CB)
std::tuple< int, unsigned, unsigned > mapBuiltinToOpcode(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set)
Helper function for finding a builtin function attributes by a demangled function name.
Type * parseBuiltinCallArgumentBaseType(const StringRef DemangledCall, unsigned ArgIdx, LLVMContext &Ctx)
Parses the provided ArgIdx argument base type in the DemangledCall skeleton.
TargetExtType * parseBuiltinTypeNameToTargetExtType(std::string TypeName, LLVMContext &Context)
Translates a string representing a SPIR-V or OpenCL builtin type to a TargetExtType that can be furth...
SPIRVTypeInst lowerBuiltinType(const Type *OpaqueType, SPIRV::AccessQualifier::AccessQualifier AccessQual, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
static bool build2DBlockIOINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's 2d block io instructions.
static bool generateExtInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, const CallBase &CB)
static bool generateBindlessImageINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateGetQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateConstructInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicFlagInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic flag instructions (e.g.
static bool generateImageSizeQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::SamplerFilterMode::SamplerFilterMode getSamplerFilterModeFromBitmask(unsigned Bitmask)
static bool buildAtomicStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic store instruction.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:328
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
static bool buildExtendedBitOpsInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building extended bit operations.
static const Type * getBlockStructType(Register ParamReg, MachineRegisterInfo *MRI)
static bool generateGroupInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
FPDecorationId demangledPostfixToDecorationId(const std::string &S)
Definition SPIRVUtils.h:556
static SPIRVTypeInst getSamplerType(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim)
static bool generateImageChannelDataTypeInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool builtinMayNeedPromotionToVec(uint32_t BuiltinNumber)
static std::tuple< Register, SPIRVTypeInst > buildBoolRegister(MachineIRBuilder &MIRBuilder, SPIRVTypeInst ResultType, SPIRVGlobalRegistry *GR)
Helper function building either a resulting scalar or vector bool register depending on the expected ...
Register createVirtualRegister(SPIRVTypeInst SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF)
static bool generateICarryBorrowInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildScopeReg(Register CLScopeRegister, SPIRV::Scope::Scope Scope, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI)
FPDecorationId
Definition SPIRVUtils.h:554
void updateRegType(Register Reg, Type *Ty, SPIRVTypeInst SpirvTy, SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Helper external function for assigning a SPIRV type to a register, ensuring the register class and ty...
static SPIRVTypeInst getInlineSpirvType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
static Register buildConstantIntReg32(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
SmallVector< MachineInstr *, 4 > createContinuedInstructions(MachineIRBuilder &MIRBuilder, unsigned Opcode, unsigned MinWC, unsigned ContinuedOpcode, ArrayRef< Register > Args, Register ReturnRegister, Register TypeID)
static unsigned getNumSizeComponents(SPIRVTypeInst imgType)
Helper function for obtaining the number of size components.
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
constexpr unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
Definition SPIRVUtils.h:247
static bool generateSampleImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateBarrierInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVTypeInst getLayoutType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI void SplitString(StringRef Source, SmallVectorImpl< StringRef > &OutFragments, StringRef Delimiters=" \t\n\v\f\r")
SplitString - Split up the specified string according to the specified delimiters,...
static SPIRVTypeInst getVulkanPushConstantType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildImageChannelDataTypeInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateKernelClockInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static void setRegClassIfNull(Register Reg, MachineRegisterInfo *MRI, SPIRVGlobalRegistry *GR)
static bool generateGroupUniformInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateWaveInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building barriers, i.e., memory/control ordering operations.
static bool generateAsyncCopy(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildBuiltinVariableLoad(MachineIRBuilder &MIRBuilder, SPIRVTypeInst VariableType, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType, Register Reg=Register(0), bool isConst=true, const std::optional< SPIRV::LinkageType::LinkageType > &LinkageTy={ SPIRV::LinkageType::Import})
Helper function for building a load instruction for loading a builtin global variable of BuiltinValue...
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope)
static bool buildAPFixedPointInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateBlockingPipesInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
static bool generateEnqueueInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
static const Type * getMachineInstrType(MachineInstr *MI)
bool isDigit(char C)
Checks if character C is one of the 10 decimal digits.
static SPIRV::SamplerAddressingMode::SamplerAddressingMode getSamplerAddressingModeFromBitmask(unsigned Bitmask)
static bool generateAtomicInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildLoadInst(SPIRVTypeInst BaseType, Register PtrRegister, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, LLT LowLevelType, Register DestinationReg=Register(0))
Helper function for building a load instruction loading into the DestinationReg.
static bool generateDotOrFMulInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateConvertInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateTernaryBitwiseFunctionINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static std::unique_ptr< const SPIRV::IncomingCall > lookupBuiltin(StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, Register ReturnRegister, SPIRVTypeInst ReturnType, const SmallVectorImpl< Register > &Arguments)
Looks up the demangled builtin call in the SPIRVBuiltins.td records using the provided DemangledCall ...
static bool generateCastToPtrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
constexpr bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
Definition SPIRVUtils.h:232
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
static bool buildSelectInst(MachineIRBuilder &MIRBuilder, Register ReturnRegister, Register SourceRegister, SPIRVTypeInst ReturnType, SPIRVGlobalRegistry *GR)
Helper function for building either a vector or scalar select instruction depending on the expected R...
static Register buildMemSemanticsReg(Register SemanticsRegister, Register PtrRegister, unsigned &Semantics, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static unsigned getConstFromIntrinsic(Register Reg, MachineRegisterInfo *MRI)
static SPIRVTypeInst getOrCreateSPIRVDeviceEventPointer(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateImageMiscQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSelectInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic load instruction.
static bool generateIntelSubgroupsInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVTypeInst getCoopMatrType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateExtendedBitOpsInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildPipeInst(const SPIRV::IncomingCall *Call, unsigned Opcode, unsigned Scope, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSpecConstantInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * parseBasicTypeName(StringRef &TypeName, LLVMContext &Ctx)
static bool generateVectorLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, uint64_t DefaultValue)
static bool generateCoopMatrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SmallVector< Register > getBuiltinCallArguments(const SPIRV::IncomingCall *Call, uint32_t BuiltinNumber, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVTypeInst getNonParameterizedType(const TargetExtType *ExtensionType, const SPIRV::BuiltinType *TypeRecord, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildBindlessImageINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's bindless image instructions.
static bool buildAtomicFloatingRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic floating-type instruction.
MachineInstr * getDefInstrMaybeConstant(Register &ConstReg, const MachineRegisterInfo *MRI)
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1917
static bool generate2DBlockIOINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateReadImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
bool hasBuiltinTypePrefix(StringRef Name)
static bool buildEnqueueKernel(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * getMDOperandAsType(const MDNode *N, unsigned I)
static bool generatePipeInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildTernaryBitwiseFunctionINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's OpBitwiseFunctionINTEL instruction.
static bool generateAPFixedPointInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic instructions.
static SPIRV::MemorySemantics::MemorySemantics getSPIRVMemSemantics(std::memory_order MemOrder)
static bool generateRelationalInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVTypeInst getPipeType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicInitInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
Helper function for translating atomic init to OpStore.
static bool generateWriteImageInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Type * parseTypeString(const StringRef Name, LLVMContext &Context)
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
static bool generatePredicatedLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateAtomicFloatingInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateAFPInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildNDRange(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static MachineInstr * getBlockStructInstr(Register ParamReg, MachineRegisterInfo *MRI)
static SPIRVTypeInst getSampledImageType(const TargetExtType *OpaqueType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode, const SPIRV::IncomingCall *Call, Register TypeReg, ArrayRef< uint32_t > ImmArgs={})
static unsigned getSamplerParamFromBitmask(unsigned Bitmask)
static SPIRVTypeInst getVulkanBufferType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicCompareExchangeInst(const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic compare-exchange instruction.
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
static bool generateBuiltinVar(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:870
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
This class contains a discriminated union of information about pointers in memory operands,...
FPRoundingMode::FPRoundingMode RoundingMode
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
const SmallVectorImpl< Register > & Arguments
const SPIRVTypeInst ReturnType
IncomingCall(const std::string BuiltinName, const DemangledBuiltin *Builtin, const Register ReturnRegister, SPIRVTypeInst ReturnType, const SmallVectorImpl< Register > &Arguments)
const std::string BuiltinName
const DemangledBuiltin * Builtin
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
FPRoundingMode::FPRoundingMode RoundingMode