LLVM 22.0.0git
SPIRVBuiltins.cpp
Go to the documentation of this file.
1//===- SPIRVBuiltins.cpp - SPIR-V Built-in Functions ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements lowering builtin function calls and types using their
10// demangled names and TableGen records.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SPIRVBuiltins.h"
15#include "SPIRV.h"
16#include "SPIRVSubtarget.h"
17#include "SPIRVUtils.h"
20#include "llvm/IR/IntrinsicsSPIRV.h"
21#include <regex>
22#include <string>
23#include <tuple>
24
25#define DEBUG_TYPE "spirv-builtins"
26
27namespace llvm {
28namespace SPIRV {
29#define GET_BuiltinGroup_DECL
30#include "SPIRVGenTables.inc"
31
34 InstructionSet::InstructionSet Set;
35 BuiltinGroup Group;
38};
39
40#define GET_DemangledBuiltins_DECL
41#define GET_DemangledBuiltins_IMPL
42
60
63 InstructionSet::InstructionSet Set;
65};
66
67#define GET_NativeBuiltins_DECL
68#define GET_NativeBuiltins_IMPL
69
85
86#define GET_GroupBuiltins_DECL
87#define GET_GroupBuiltins_IMPL
88
96
97#define GET_IntelSubgroupsBuiltins_DECL
98#define GET_IntelSubgroupsBuiltins_IMPL
99
104
105#define GET_AtomicFloatingBuiltins_DECL
106#define GET_AtomicFloatingBuiltins_IMPL
112
113#define GET_GroupUniformBuiltins_DECL
114#define GET_GroupUniformBuiltins_IMPL
115
118 InstructionSet::InstructionSet Set;
119 BuiltIn::BuiltIn Value;
120};
121
122using namespace BuiltIn;
123#define GET_GetBuiltins_DECL
124#define GET_GetBuiltins_IMPL
125
128 InstructionSet::InstructionSet Set;
130};
131
132#define GET_ImageQueryBuiltins_DECL
133#define GET_ImageQueryBuiltins_IMPL
134
140
141#define GET_IntegerDotProductBuiltins_DECL
142#define GET_IntegerDotProductBuiltins_IMPL
143
146 InstructionSet::InstructionSet Set;
151 bool IsTF32;
152 FPRoundingMode::FPRoundingMode RoundingMode;
153};
154
157 InstructionSet::InstructionSet Set;
161 FPRoundingMode::FPRoundingMode RoundingMode;
162};
163
164using namespace FPRoundingMode;
165#define GET_ConvertBuiltins_DECL
166#define GET_ConvertBuiltins_IMPL
167
168using namespace InstructionSet;
169#define GET_VectorLoadStoreBuiltins_DECL
170#define GET_VectorLoadStoreBuiltins_IMPL
171
172#define GET_CLMemoryScope_DECL
173#define GET_CLSamplerAddressingMode_DECL
174#define GET_CLMemoryFenceFlags_DECL
175#define GET_ExtendedBuiltins_DECL
176#include "SPIRVGenTables.inc"
177} // namespace SPIRV
178
179//===----------------------------------------------------------------------===//
180// Misc functions for looking up builtins and veryfying requirements using
181// TableGen records
182//===----------------------------------------------------------------------===//
183
184namespace SPIRV {
185/// Parses the name part of the demangled builtin call.
186std::string lookupBuiltinNameHelper(StringRef DemangledCall,
187 FPDecorationId *DecorationId) {
188 StringRef PassPrefix = "(anonymous namespace)::";
189 std::string BuiltinName;
190 // Itanium Demangler result may have "(anonymous namespace)::" prefix
191 if (DemangledCall.starts_with(PassPrefix))
192 BuiltinName = DemangledCall.substr(PassPrefix.size());
193 else
194 BuiltinName = DemangledCall;
195 // Extract the builtin function name and types of arguments from the call
196 // skeleton.
197 BuiltinName = BuiltinName.substr(0, BuiltinName.find('('));
198
199 // Account for possible "__spirv_ocl_" prefix in SPIR-V friendly LLVM IR
200 if (BuiltinName.rfind("__spirv_ocl_", 0) == 0)
201 BuiltinName = BuiltinName.substr(12);
202
203 // Check if the extracted name contains type information between angle
204 // brackets. If so, the builtin is an instantiated template - needs to have
205 // the information after angle brackets and return type removed.
206 std::size_t Pos1 = BuiltinName.rfind('<');
207 if (Pos1 != std::string::npos && BuiltinName.back() == '>') {
208 std::size_t Pos2 = BuiltinName.rfind(' ', Pos1);
209 if (Pos2 == std::string::npos)
210 Pos2 = 0;
211 else
212 ++Pos2;
213 BuiltinName = BuiltinName.substr(Pos2, Pos1 - Pos2);
214 BuiltinName = BuiltinName.substr(BuiltinName.find_last_of(' ') + 1);
215 }
216
217 // Check if the extracted name begins with:
218 // - "__spirv_ImageSampleExplicitLod"
219 // - "__spirv_ImageRead"
220 // - "__spirv_ImageWrite"
221 // - "__spirv_ImageQuerySizeLod"
222 // - "__spirv_UDotKHR"
223 // - "__spirv_SDotKHR"
224 // - "__spirv_SUDotKHR"
225 // - "__spirv_SDotAccSatKHR"
226 // - "__spirv_UDotAccSatKHR"
227 // - "__spirv_SUDotAccSatKHR"
228 // - "__spirv_ReadClockKHR"
229 // - "__spirv_SubgroupBlockReadINTEL"
230 // - "__spirv_SubgroupImageBlockReadINTEL"
231 // - "__spirv_SubgroupImageMediaBlockReadINTEL"
232 // - "__spirv_SubgroupImageMediaBlockWriteINTEL"
233 // - "__spirv_Convert"
234 // - "__spirv_Round"
235 // - "__spirv_UConvert"
236 // - "__spirv_SConvert"
237 // - "__spirv_FConvert"
238 // - "__spirv_SatConvert"
239 // and maybe contains return type information at the end "_R<type>".
240 // If so, extract the plain builtin name without the type information.
241 static const std::regex SpvWithR(
242 "(__spirv_(ImageSampleExplicitLod|ImageRead|ImageWrite|ImageQuerySizeLod|"
243 "UDotKHR|"
244 "SDotKHR|SUDotKHR|SDotAccSatKHR|UDotAccSatKHR|SUDotAccSatKHR|"
245 "ReadClockKHR|SubgroupBlockReadINTEL|SubgroupImageBlockReadINTEL|"
246 "SubgroupImageMediaBlockReadINTEL|SubgroupImageMediaBlockWriteINTEL|"
247 "Convert|Round|"
248 "UConvert|SConvert|FConvert|SatConvert)[^_]*)(_R[^_]*_?(\\w+)?.*)?");
249 std::smatch Match;
250 if (std::regex_match(BuiltinName, Match, SpvWithR) && Match.size() > 1) {
251 std::ssub_match SubMatch;
252 if (DecorationId && Match.size() > 3) {
253 SubMatch = Match[4];
254 *DecorationId = demangledPostfixToDecorationId(SubMatch.str());
255 }
256 SubMatch = Match[1];
257 BuiltinName = SubMatch.str();
258 }
259
260 return BuiltinName;
261}
262} // namespace SPIRV
263
264/// Looks up the demangled builtin call in the SPIRVBuiltins.td records using
265/// the provided \p DemangledCall and specified \p Set.
266///
267/// The lookup follows the following algorithm, returning the first successful
268/// match:
269/// 1. Search with the plain demangled name (expecting a 1:1 match).
270/// 2. Search with the prefix before or suffix after the demangled name
271/// signyfying the type of the first argument.
272///
273/// \returns Wrapper around the demangled call and found builtin definition.
274static std::unique_ptr<const SPIRV::IncomingCall>
276 SPIRV::InstructionSet::InstructionSet Set,
277 Register ReturnRegister, const SPIRVType *ReturnType,
279 std::string BuiltinName = SPIRV::lookupBuiltinNameHelper(DemangledCall);
280
281 SmallVector<StringRef, 10> BuiltinArgumentTypes;
282 StringRef BuiltinArgs =
283 DemangledCall.slice(DemangledCall.find('(') + 1, DemangledCall.find(')'));
284 BuiltinArgs.split(BuiltinArgumentTypes, ',', -1, false);
285
286 // Look up the builtin in the defined set. Start with the plain demangled
287 // name, expecting a 1:1 match in the defined builtin set.
288 const SPIRV::DemangledBuiltin *Builtin;
289 if ((Builtin = SPIRV::lookupBuiltin(BuiltinName, Set)))
290 return std::make_unique<SPIRV::IncomingCall>(
291 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
292
293 // If the initial look up was unsuccessful and the demangled call takes at
294 // least 1 argument, add a prefix or suffix signifying the type of the first
295 // argument and repeat the search.
296 if (BuiltinArgumentTypes.size() >= 1) {
297 char FirstArgumentType = BuiltinArgumentTypes[0][0];
298 // Prefix to be added to the builtin's name for lookup.
299 // For example, OpenCL "abs" taking an unsigned value has a prefix "u_".
300 std::string Prefix;
301
302 switch (FirstArgumentType) {
303 // Unsigned:
304 case 'u':
305 if (Set == SPIRV::InstructionSet::OpenCL_std)
306 Prefix = "u_";
307 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
308 Prefix = "u";
309 break;
310 // Signed:
311 case 'c':
312 case 's':
313 case 'i':
314 case 'l':
315 if (Set == SPIRV::InstructionSet::OpenCL_std)
316 Prefix = "s_";
317 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
318 Prefix = "s";
319 break;
320 // Floating-point:
321 case 'f':
322 case 'd':
323 case 'h':
324 if (Set == SPIRV::InstructionSet::OpenCL_std ||
325 Set == SPIRV::InstructionSet::GLSL_std_450)
326 Prefix = "f";
327 break;
328 }
329
330 // If argument-type name prefix was added, look up the builtin again.
331 if (!Prefix.empty() &&
332 (Builtin = SPIRV::lookupBuiltin(Prefix + BuiltinName, Set)))
333 return std::make_unique<SPIRV::IncomingCall>(
334 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
335
336 // If lookup with a prefix failed, find a suffix to be added to the
337 // builtin's name for lookup. For example, OpenCL "group_reduce_max" taking
338 // an unsigned value has a suffix "u".
339 std::string Suffix;
340
341 switch (FirstArgumentType) {
342 // Unsigned:
343 case 'u':
344 Suffix = "u";
345 break;
346 // Signed:
347 case 'c':
348 case 's':
349 case 'i':
350 case 'l':
351 Suffix = "s";
352 break;
353 // Floating-point:
354 case 'f':
355 case 'd':
356 case 'h':
357 Suffix = "f";
358 break;
359 }
360
361 // If argument-type name suffix was added, look up the builtin again.
362 if (!Suffix.empty() &&
363 (Builtin = SPIRV::lookupBuiltin(BuiltinName + Suffix, Set)))
364 return std::make_unique<SPIRV::IncomingCall>(
365 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
366 }
367
368 // No builtin with such name was found in the set.
369 return nullptr;
370}
371
374 // We expect the following sequence of instructions:
375 // %0:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.alloca)
376 // or = G_GLOBAL_VALUE @block_literal_global
377 // %1:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.bitcast), %0
378 // %2:_(p4) = G_ADDRSPACE_CAST %1:_(pN)
379 MachineInstr *MI = MRI->getUniqueVRegDef(ParamReg);
380 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST &&
381 MI->getOperand(1).isReg());
382 Register BitcastReg = MI->getOperand(1).getReg();
383 MachineInstr *BitcastMI = MRI->getUniqueVRegDef(BitcastReg);
384 assert(isSpvIntrinsic(*BitcastMI, Intrinsic::spv_bitcast) &&
385 BitcastMI->getOperand(2).isReg());
386 Register ValueReg = BitcastMI->getOperand(2).getReg();
387 MachineInstr *ValueMI = MRI->getUniqueVRegDef(ValueReg);
388 return ValueMI;
389}
390
391// Return an integer constant corresponding to the given register and
392// defined in spv_track_constant.
393// TODO: maybe unify with prelegalizer pass.
395 MachineInstr *DefMI = MRI->getUniqueVRegDef(Reg);
396 assert(DefMI->getOpcode() == TargetOpcode::G_CONSTANT &&
397 DefMI->getOperand(1).isCImm());
398 return DefMI->getOperand(1).getCImm()->getValue().getZExtValue();
399}
400
401// Return type of the instruction result from spv_assign_type intrinsic.
402// TODO: maybe unify with prelegalizer pass.
404 MachineInstr *NextMI = MI->getNextNode();
405 if (!NextMI)
406 return nullptr;
407 if (isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_name))
408 if ((NextMI = NextMI->getNextNode()) == nullptr)
409 return nullptr;
410 Register ValueReg = MI->getOperand(0).getReg();
411 if ((!isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_type) &&
412 !isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_ptr_type)) ||
413 NextMI->getOperand(1).getReg() != ValueReg)
414 return nullptr;
415 Type *Ty = getMDOperandAsType(NextMI->getOperand(2).getMetadata(), 0);
416 assert(Ty && "Type is expected");
417 return Ty;
418}
419
420static const Type *getBlockStructType(Register ParamReg,
422 // In principle, this information should be passed to us from Clang via
423 // an elementtype attribute. However, said attribute requires that
424 // the function call be an intrinsic, which is not. Instead, we rely on being
425 // able to trace this to the declaration of a variable: OpenCL C specification
426 // section 6.12.5 should guarantee that we can do this.
428 if (MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE)
429 return MI->getOperand(1).getGlobal()->getType();
430 assert(isSpvIntrinsic(*MI, Intrinsic::spv_alloca) &&
431 "Blocks in OpenCL C must be traceable to allocation site");
432 return getMachineInstrType(MI);
433}
434
435//===----------------------------------------------------------------------===//
436// Helper functions for building misc instructions
437//===----------------------------------------------------------------------===//
438
439/// Helper function building either a resulting scalar or vector bool register
440/// depending on the expected \p ResultType.
441///
442/// \returns Tuple of the resulting register and its type.
443static std::tuple<Register, SPIRVType *>
444buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType,
446 LLT Type;
447 SPIRVType *BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder, true);
448
449 if (ResultType->getOpcode() == SPIRV::OpTypeVector) {
450 unsigned VectorElements = ResultType->getOperand(2).getImm();
451 BoolType = GR->getOrCreateSPIRVVectorType(BoolType, VectorElements,
452 MIRBuilder, true);
455 Type = LLT::vector(LLVMVectorType->getElementCount(), 1);
456 } else {
457 Type = LLT::scalar(1);
458 }
459
460 Register ResultRegister =
462 MIRBuilder.getMRI()->setRegClass(ResultRegister, GR->getRegClass(ResultType));
463 GR->assignSPIRVTypeToVReg(BoolType, ResultRegister, MIRBuilder.getMF());
464 return std::make_tuple(ResultRegister, BoolType);
465}
466
467/// Helper function for building either a vector or scalar select instruction
468/// depending on the expected \p ResultType.
469static bool buildSelectInst(MachineIRBuilder &MIRBuilder,
470 Register ReturnRegister, Register SourceRegister,
471 const SPIRVType *ReturnType,
473 Register TrueConst, FalseConst;
474
475 if (ReturnType->getOpcode() == SPIRV::OpTypeVector) {
476 unsigned Bits = GR->getScalarOrVectorBitWidth(ReturnType);
478 TrueConst =
479 GR->getOrCreateConsIntVector(AllOnes, MIRBuilder, ReturnType, true);
480 FalseConst = GR->getOrCreateConsIntVector(0, MIRBuilder, ReturnType, true);
481 } else {
482 TrueConst = GR->buildConstantInt(1, MIRBuilder, ReturnType, true);
483 FalseConst = GR->buildConstantInt(0, MIRBuilder, ReturnType, true);
484 }
485
486 return MIRBuilder.buildSelect(ReturnRegister, SourceRegister, TrueConst,
487 FalseConst);
488}
489
490/// Helper function for building a load instruction loading into the
491/// \p DestinationReg.
493 MachineIRBuilder &MIRBuilder,
494 SPIRVGlobalRegistry *GR, LLT LowLevelType,
495 Register DestinationReg = Register(0)) {
496 if (!DestinationReg.isValid())
497 DestinationReg = createVirtualRegister(BaseType, GR, MIRBuilder);
498 // TODO: consider using correct address space and alignment (p0 is canonical
499 // type for selection though).
501 MIRBuilder.buildLoad(DestinationReg, PtrRegister, PtrInfo, Align());
502 return DestinationReg;
503}
504
505/// Helper function for building a load instruction for loading a builtin global
506/// variable of \p BuiltinValue value.
508 MachineIRBuilder &MIRBuilder, SPIRVType *VariableType,
509 SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType,
510 Register Reg = Register(0), bool isConst = true,
511 const std::optional<SPIRV::LinkageType::LinkageType> &LinkageTy = {
512 SPIRV::LinkageType::Import}) {
513 Register NewRegister =
514 MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::pIDRegClass);
515 MIRBuilder.getMRI()->setType(
516 NewRegister,
517 LLT::pointer(storageClassToAddressSpace(SPIRV::StorageClass::Function),
518 GR->getPointerSize()));
520 VariableType, MIRBuilder, SPIRV::StorageClass::Input);
521 GR->assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
522
523 // Set up the global OpVariable with the necessary builtin decorations.
524 Register Variable = GR->buildGlobalVariable(
525 NewRegister, PtrType, getLinkStringForBuiltIn(BuiltinValue), nullptr,
526 SPIRV::StorageClass::Input, nullptr, /* isConst= */ isConst, LinkageTy,
527 MIRBuilder, false);
528
529 // Load the value from the global variable.
530 Register LoadedRegister =
531 buildLoadInst(VariableType, Variable, MIRBuilder, GR, LLType, Reg);
532 MIRBuilder.getMRI()->setType(LoadedRegister, LLType);
533 return LoadedRegister;
534}
535
536/// Helper external function for inserting ASSIGN_TYPE instuction between \p Reg
537/// and its definition, set the new register as a destination of the definition,
538/// assign SPIRVType to both registers. If SpirvTy is provided, use it as
539/// SPIRVType in ASSIGN_TYPE, otherwise create it from \p Ty. Defined in
540/// SPIRVPreLegalizer.cpp.
541extern void insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy,
544
545// TODO: Move to TableGen.
546static SPIRV::MemorySemantics::MemorySemantics
547getSPIRVMemSemantics(std::memory_order MemOrder) {
548 switch (MemOrder) {
549 case std::memory_order_relaxed:
550 return SPIRV::MemorySemantics::None;
551 case std::memory_order_acquire:
552 return SPIRV::MemorySemantics::Acquire;
553 case std::memory_order_release:
554 return SPIRV::MemorySemantics::Release;
555 case std::memory_order_acq_rel:
556 return SPIRV::MemorySemantics::AcquireRelease;
557 case std::memory_order_seq_cst:
558 return SPIRV::MemorySemantics::SequentiallyConsistent;
559 default:
560 report_fatal_error("Unknown CL memory scope");
561 }
562}
563
564static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope) {
565 switch (ClScope) {
566 case SPIRV::CLMemoryScope::memory_scope_work_item:
567 return SPIRV::Scope::Invocation;
568 case SPIRV::CLMemoryScope::memory_scope_work_group:
569 return SPIRV::Scope::Workgroup;
570 case SPIRV::CLMemoryScope::memory_scope_device:
571 return SPIRV::Scope::Device;
572 case SPIRV::CLMemoryScope::memory_scope_all_svm_devices:
573 return SPIRV::Scope::CrossDevice;
574 case SPIRV::CLMemoryScope::memory_scope_sub_group:
575 return SPIRV::Scope::Subgroup;
576 }
577 report_fatal_error("Unknown CL memory scope");
578}
579
581 MachineIRBuilder &MIRBuilder,
583 return GR->buildConstantInt(
584 Val, MIRBuilder, GR->getOrCreateSPIRVIntegerType(32, MIRBuilder), true);
585}
586
587static Register buildScopeReg(Register CLScopeRegister,
588 SPIRV::Scope::Scope Scope,
589 MachineIRBuilder &MIRBuilder,
592 if (CLScopeRegister.isValid()) {
593 auto CLScope =
594 static_cast<SPIRV::CLMemoryScope>(getIConstVal(CLScopeRegister, MRI));
595 Scope = getSPIRVScope(CLScope);
596
597 if (CLScope == static_cast<unsigned>(Scope)) {
598 MRI->setRegClass(CLScopeRegister, &SPIRV::iIDRegClass);
599 return CLScopeRegister;
600 }
601 }
602 return buildConstantIntReg32(Scope, MIRBuilder, GR);
603}
604
607 if (MRI->getRegClassOrNull(Reg))
608 return;
609 SPIRVType *SpvType = GR->getSPIRVTypeForVReg(Reg);
610 MRI->setRegClass(Reg,
611 SpvType ? GR->getRegClass(SpvType) : &SPIRV::iIDRegClass);
612}
613
614static Register buildMemSemanticsReg(Register SemanticsRegister,
615 Register PtrRegister, unsigned &Semantics,
616 MachineIRBuilder &MIRBuilder,
618 if (SemanticsRegister.isValid()) {
619 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
620 std::memory_order Order =
621 static_cast<std::memory_order>(getIConstVal(SemanticsRegister, MRI));
622 Semantics =
623 getSPIRVMemSemantics(Order) |
625 if (static_cast<unsigned>(Order) == Semantics) {
626 MRI->setRegClass(SemanticsRegister, &SPIRV::iIDRegClass);
627 return SemanticsRegister;
628 }
629 }
630 return buildConstantIntReg32(Semantics, MIRBuilder, GR);
631}
632
633static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode,
635 Register TypeReg,
636 ArrayRef<uint32_t> ImmArgs = {}) {
637 auto MIB = MIRBuilder.buildInstr(Opcode);
638 if (TypeReg.isValid())
639 MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
640 unsigned Sz = Call->Arguments.size() - ImmArgs.size();
641 for (unsigned i = 0; i < Sz; ++i)
642 MIB.addUse(Call->Arguments[i]);
643 for (uint32_t ImmArg : ImmArgs)
644 MIB.addImm(ImmArg);
645 return true;
646}
647
648/// Helper function for translating atomic init to OpStore.
650 MachineIRBuilder &MIRBuilder) {
651 if (Call->isSpirvOp())
652 return buildOpFromWrapper(MIRBuilder, SPIRV::OpStore, Call, Register(0));
653
654 assert(Call->Arguments.size() == 2 &&
655 "Need 2 arguments for atomic init translation");
656 MIRBuilder.buildInstr(SPIRV::OpStore)
657 .addUse(Call->Arguments[0])
658 .addUse(Call->Arguments[1]);
659 return true;
660}
661
662/// Helper function for building an atomic load instruction.
664 MachineIRBuilder &MIRBuilder,
666 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
667 if (Call->isSpirvOp())
668 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicLoad, Call, TypeReg);
669
670 Register PtrRegister = Call->Arguments[0];
671 // TODO: if true insert call to __translate_ocl_memory_sccope before
672 // OpAtomicLoad and the function implementation. We can use Translator's
673 // output for transcoding/atomic_explicit_arguments.cl as an example.
674 Register ScopeRegister =
675 Call->Arguments.size() > 1
676 ? Call->Arguments[1]
677 : buildConstantIntReg32(SPIRV::Scope::Device, MIRBuilder, GR);
678 Register MemSemanticsReg;
679 if (Call->Arguments.size() > 2) {
680 // TODO: Insert call to __translate_ocl_memory_order before OpAtomicLoad.
681 MemSemanticsReg = Call->Arguments[2];
682 } else {
683 int Semantics =
684 SPIRV::MemorySemantics::SequentiallyConsistent |
686 MemSemanticsReg = buildConstantIntReg32(Semantics, MIRBuilder, GR);
687 }
688
689 MIRBuilder.buildInstr(SPIRV::OpAtomicLoad)
690 .addDef(Call->ReturnRegister)
691 .addUse(TypeReg)
692 .addUse(PtrRegister)
693 .addUse(ScopeRegister)
694 .addUse(MemSemanticsReg);
695 return true;
696}
697
698/// Helper function for building an atomic store instruction.
700 MachineIRBuilder &MIRBuilder,
702 if (Call->isSpirvOp())
703 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicStore, Call,
704 Register(0));
705
706 Register ScopeRegister =
707 buildConstantIntReg32(SPIRV::Scope::Device, MIRBuilder, GR);
708 Register PtrRegister = Call->Arguments[0];
709 int Semantics =
710 SPIRV::MemorySemantics::SequentiallyConsistent |
712 Register MemSemanticsReg = buildConstantIntReg32(Semantics, MIRBuilder, GR);
713 MIRBuilder.buildInstr(SPIRV::OpAtomicStore)
714 .addUse(PtrRegister)
715 .addUse(ScopeRegister)
716 .addUse(MemSemanticsReg)
717 .addUse(Call->Arguments[1]);
718 return true;
719}
720
721/// Helper function for building an atomic compare-exchange instruction.
723 const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin,
724 unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
725 if (Call->isSpirvOp())
726 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
727 GR->getSPIRVTypeID(Call->ReturnType));
728
729 bool IsCmpxchg = Call->Builtin->Name.contains("cmpxchg");
730 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
731
732 Register ObjectPtr = Call->Arguments[0]; // Pointer (volatile A *object.)
733 Register ExpectedArg = Call->Arguments[1]; // Comparator (C* expected).
734 Register Desired = Call->Arguments[2]; // Value (C Desired).
735 SPIRVType *SpvDesiredTy = GR->getSPIRVTypeForVReg(Desired);
736 LLT DesiredLLT = MRI->getType(Desired);
737
738 assert(GR->getSPIRVTypeForVReg(ObjectPtr)->getOpcode() ==
739 SPIRV::OpTypePointer);
740 unsigned ExpectedType = GR->getSPIRVTypeForVReg(ExpectedArg)->getOpcode();
741 (void)ExpectedType;
742 assert(IsCmpxchg ? ExpectedType == SPIRV::OpTypeInt
743 : ExpectedType == SPIRV::OpTypePointer);
744 assert(GR->isScalarOfType(Desired, SPIRV::OpTypeInt));
745
746 SPIRVType *SpvObjectPtrTy = GR->getSPIRVTypeForVReg(ObjectPtr);
747 assert(SpvObjectPtrTy->getOperand(2).isReg() && "SPIRV type is expected");
748 auto StorageClass = static_cast<SPIRV::StorageClass::StorageClass>(
749 SpvObjectPtrTy->getOperand(1).getImm());
750 auto MemSemStorage = getMemSemanticsForStorageClass(StorageClass);
751
752 Register MemSemEqualReg;
753 Register MemSemUnequalReg;
754 uint64_t MemSemEqual =
755 IsCmpxchg
756 ? SPIRV::MemorySemantics::None
757 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
758 uint64_t MemSemUnequal =
759 IsCmpxchg
760 ? SPIRV::MemorySemantics::None
761 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
762 if (Call->Arguments.size() >= 4) {
763 assert(Call->Arguments.size() >= 5 &&
764 "Need 5+ args for explicit atomic cmpxchg");
765 auto MemOrdEq =
766 static_cast<std::memory_order>(getIConstVal(Call->Arguments[3], MRI));
767 auto MemOrdNeq =
768 static_cast<std::memory_order>(getIConstVal(Call->Arguments[4], MRI));
769 MemSemEqual = getSPIRVMemSemantics(MemOrdEq) | MemSemStorage;
770 MemSemUnequal = getSPIRVMemSemantics(MemOrdNeq) | MemSemStorage;
771 if (static_cast<unsigned>(MemOrdEq) == MemSemEqual)
772 MemSemEqualReg = Call->Arguments[3];
773 if (static_cast<unsigned>(MemOrdNeq) == MemSemEqual)
774 MemSemUnequalReg = Call->Arguments[4];
775 }
776 if (!MemSemEqualReg.isValid())
777 MemSemEqualReg = buildConstantIntReg32(MemSemEqual, MIRBuilder, GR);
778 if (!MemSemUnequalReg.isValid())
779 MemSemUnequalReg = buildConstantIntReg32(MemSemUnequal, MIRBuilder, GR);
780
781 Register ScopeReg;
782 auto Scope = IsCmpxchg ? SPIRV::Scope::Workgroup : SPIRV::Scope::Device;
783 if (Call->Arguments.size() >= 6) {
784 assert(Call->Arguments.size() == 6 &&
785 "Extra args for explicit atomic cmpxchg");
786 auto ClScope = static_cast<SPIRV::CLMemoryScope>(
787 getIConstVal(Call->Arguments[5], MRI));
788 Scope = getSPIRVScope(ClScope);
789 if (ClScope == static_cast<unsigned>(Scope))
790 ScopeReg = Call->Arguments[5];
791 }
792 if (!ScopeReg.isValid())
793 ScopeReg = buildConstantIntReg32(Scope, MIRBuilder, GR);
794
795 Register Expected = IsCmpxchg
796 ? ExpectedArg
797 : buildLoadInst(SpvDesiredTy, ExpectedArg, MIRBuilder,
798 GR, LLT::scalar(64));
799 MRI->setType(Expected, DesiredLLT);
800 Register Tmp = !IsCmpxchg ? MRI->createGenericVirtualRegister(DesiredLLT)
801 : Call->ReturnRegister;
802 if (!MRI->getRegClassOrNull(Tmp))
803 MRI->setRegClass(Tmp, GR->getRegClass(SpvDesiredTy));
804 GR->assignSPIRVTypeToVReg(SpvDesiredTy, Tmp, MIRBuilder.getMF());
805
806 MIRBuilder.buildInstr(Opcode)
807 .addDef(Tmp)
808 .addUse(GR->getSPIRVTypeID(SpvDesiredTy))
809 .addUse(ObjectPtr)
810 .addUse(ScopeReg)
811 .addUse(MemSemEqualReg)
812 .addUse(MemSemUnequalReg)
813 .addUse(Desired)
815 if (!IsCmpxchg) {
816 MIRBuilder.buildInstr(SPIRV::OpStore).addUse(ExpectedArg).addUse(Tmp);
817 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Call->ReturnRegister, Tmp, Expected);
818 }
819 return true;
820}
821
822/// Helper function for building atomic instructions.
823static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
824 MachineIRBuilder &MIRBuilder,
826 if (Call->isSpirvOp())
827 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
828 GR->getSPIRVTypeID(Call->ReturnType));
829
830 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
831 Register ScopeRegister =
832 Call->Arguments.size() >= 4 ? Call->Arguments[3] : Register();
833
834 assert(Call->Arguments.size() <= 4 &&
835 "Too many args for explicit atomic RMW");
836 ScopeRegister = buildScopeReg(ScopeRegister, SPIRV::Scope::Workgroup,
837 MIRBuilder, GR, MRI);
838
839 Register PtrRegister = Call->Arguments[0];
840 unsigned Semantics = SPIRV::MemorySemantics::None;
841 Register MemSemanticsReg =
842 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
843 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
844 Semantics, MIRBuilder, GR);
845 Register ValueReg = Call->Arguments[1];
846 Register ValueTypeReg = GR->getSPIRVTypeID(Call->ReturnType);
847 // support cl_ext_float_atomics
848 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeFloat) {
849 if (Opcode == SPIRV::OpAtomicIAdd) {
850 Opcode = SPIRV::OpAtomicFAddEXT;
851 } else if (Opcode == SPIRV::OpAtomicISub) {
852 // Translate OpAtomicISub applied to a floating type argument to
853 // OpAtomicFAddEXT with the negative value operand
854 Opcode = SPIRV::OpAtomicFAddEXT;
855 Register NegValueReg =
856 MRI->createGenericVirtualRegister(MRI->getType(ValueReg));
857 MRI->setRegClass(NegValueReg, GR->getRegClass(Call->ReturnType));
858 GR->assignSPIRVTypeToVReg(Call->ReturnType, NegValueReg,
859 MIRBuilder.getMF());
860 MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
861 .addDef(NegValueReg)
862 .addUse(ValueReg);
863 insertAssignInstr(NegValueReg, nullptr, Call->ReturnType, GR, MIRBuilder,
864 MIRBuilder.getMF().getRegInfo());
865 ValueReg = NegValueReg;
866 }
867 }
868 MIRBuilder.buildInstr(Opcode)
869 .addDef(Call->ReturnRegister)
870 .addUse(ValueTypeReg)
871 .addUse(PtrRegister)
872 .addUse(ScopeRegister)
873 .addUse(MemSemanticsReg)
874 .addUse(ValueReg);
875 return true;
876}
877
878/// Helper function for building an atomic floating-type instruction.
880 unsigned Opcode,
881 MachineIRBuilder &MIRBuilder,
883 assert(Call->Arguments.size() == 4 &&
884 "Wrong number of atomic floating-type builtin");
885 Register PtrReg = Call->Arguments[0];
886 Register ScopeReg = Call->Arguments[1];
887 Register MemSemanticsReg = Call->Arguments[2];
888 Register ValueReg = Call->Arguments[3];
889 MIRBuilder.buildInstr(Opcode)
890 .addDef(Call->ReturnRegister)
891 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
892 .addUse(PtrReg)
893 .addUse(ScopeReg)
894 .addUse(MemSemanticsReg)
895 .addUse(ValueReg);
896 return true;
897}
898
899/// Helper function for building atomic flag instructions (e.g.
900/// OpAtomicFlagTestAndSet).
902 unsigned Opcode, MachineIRBuilder &MIRBuilder,
904 bool IsSet = Opcode == SPIRV::OpAtomicFlagTestAndSet;
905 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
906 if (Call->isSpirvOp())
907 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
908 IsSet ? TypeReg : Register(0));
909
910 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
911 Register PtrRegister = Call->Arguments[0];
912 unsigned Semantics = SPIRV::MemorySemantics::SequentiallyConsistent;
913 Register MemSemanticsReg =
914 Call->Arguments.size() >= 2 ? Call->Arguments[1] : Register();
915 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
916 Semantics, MIRBuilder, GR);
917
918 assert((Opcode != SPIRV::OpAtomicFlagClear ||
919 (Semantics != SPIRV::MemorySemantics::Acquire &&
920 Semantics != SPIRV::MemorySemantics::AcquireRelease)) &&
921 "Invalid memory order argument!");
922
923 Register ScopeRegister =
924 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
925 ScopeRegister =
926 buildScopeReg(ScopeRegister, SPIRV::Scope::Device, MIRBuilder, GR, MRI);
927
928 auto MIB = MIRBuilder.buildInstr(Opcode);
929 if (IsSet)
930 MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
931
932 MIB.addUse(PtrRegister).addUse(ScopeRegister).addUse(MemSemanticsReg);
933 return true;
934}
935
936/// Helper function for building barriers, i.e., memory/control ordering
937/// operations.
938static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
939 MachineIRBuilder &MIRBuilder,
941 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
942 const auto *ST =
943 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
944 if ((Opcode == SPIRV::OpControlBarrierArriveINTEL ||
945 Opcode == SPIRV::OpControlBarrierWaitINTEL) &&
946 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_split_barrier)) {
947 std::string DiagMsg = std::string(Builtin->Name) +
948 ": the builtin requires the following SPIR-V "
949 "extension: SPV_INTEL_split_barrier";
950 report_fatal_error(DiagMsg.c_str(), false);
951 }
952
953 if (Call->isSpirvOp())
954 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
955
956 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
957 unsigned MemFlags = getIConstVal(Call->Arguments[0], MRI);
958 unsigned MemSemantics = SPIRV::MemorySemantics::None;
959
960 if (MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE)
961 MemSemantics |= SPIRV::MemorySemantics::WorkgroupMemory;
962
963 if (MemFlags & SPIRV::CLK_GLOBAL_MEM_FENCE)
964 MemSemantics |= SPIRV::MemorySemantics::CrossWorkgroupMemory;
965
966 if (MemFlags & SPIRV::CLK_IMAGE_MEM_FENCE)
967 MemSemantics |= SPIRV::MemorySemantics::ImageMemory;
968
969 if (Opcode == SPIRV::OpMemoryBarrier)
970 MemSemantics = getSPIRVMemSemantics(static_cast<std::memory_order>(
971 getIConstVal(Call->Arguments[1], MRI))) |
972 MemSemantics;
973 else if (Opcode == SPIRV::OpControlBarrierArriveINTEL)
974 MemSemantics |= SPIRV::MemorySemantics::Release;
975 else if (Opcode == SPIRV::OpControlBarrierWaitINTEL)
976 MemSemantics |= SPIRV::MemorySemantics::Acquire;
977 else
978 MemSemantics |= SPIRV::MemorySemantics::SequentiallyConsistent;
979
980 Register MemSemanticsReg =
981 MemFlags == MemSemantics
982 ? Call->Arguments[0]
983 : buildConstantIntReg32(MemSemantics, MIRBuilder, GR);
984 Register ScopeReg;
985 SPIRV::Scope::Scope Scope = SPIRV::Scope::Workgroup;
986 SPIRV::Scope::Scope MemScope = Scope;
987 if (Call->Arguments.size() >= 2) {
988 assert(
989 ((Opcode != SPIRV::OpMemoryBarrier && Call->Arguments.size() == 2) ||
990 (Opcode == SPIRV::OpMemoryBarrier && Call->Arguments.size() == 3)) &&
991 "Extra args for explicitly scoped barrier");
992 Register ScopeArg = (Opcode == SPIRV::OpMemoryBarrier) ? Call->Arguments[2]
993 : Call->Arguments[1];
994 SPIRV::CLMemoryScope CLScope =
995 static_cast<SPIRV::CLMemoryScope>(getIConstVal(ScopeArg, MRI));
996 MemScope = getSPIRVScope(CLScope);
997 if (!(MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE) ||
998 (Opcode == SPIRV::OpMemoryBarrier))
999 Scope = MemScope;
1000 if (CLScope == static_cast<unsigned>(Scope))
1001 ScopeReg = Call->Arguments[1];
1002 }
1003
1004 if (!ScopeReg.isValid())
1005 ScopeReg = buildConstantIntReg32(Scope, MIRBuilder, GR);
1006
1007 auto MIB = MIRBuilder.buildInstr(Opcode).addUse(ScopeReg);
1008 if (Opcode != SPIRV::OpMemoryBarrier)
1009 MIB.addUse(buildConstantIntReg32(MemScope, MIRBuilder, GR));
1010 MIB.addUse(MemSemanticsReg);
1011 return true;
1012}
1013
1014/// Helper function for building extended bit operations.
1016 unsigned Opcode,
1017 MachineIRBuilder &MIRBuilder,
1018 SPIRVGlobalRegistry *GR) {
1019 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1020 const auto *ST =
1021 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
1022 if ((Opcode == SPIRV::OpBitFieldInsert ||
1023 Opcode == SPIRV::OpBitFieldSExtract ||
1024 Opcode == SPIRV::OpBitFieldUExtract || Opcode == SPIRV::OpBitReverse) &&
1025 !ST->canUseExtension(SPIRV::Extension::SPV_KHR_bit_instructions)) {
1026 std::string DiagMsg = std::string(Builtin->Name) +
1027 ": the builtin requires the following SPIR-V "
1028 "extension: SPV_KHR_bit_instructions";
1029 report_fatal_error(DiagMsg.c_str(), false);
1030 }
1031
1032 // Generate SPIRV instruction accordingly.
1033 if (Call->isSpirvOp())
1034 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1035 GR->getSPIRVTypeID(Call->ReturnType));
1036
1037 auto MIB = MIRBuilder.buildInstr(Opcode)
1038 .addDef(Call->ReturnRegister)
1039 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1040 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1041 MIB.addUse(Call->Arguments[i]);
1042
1043 return true;
1044}
1045
1046/// Helper function for building Intel's bindless image instructions.
1048 unsigned Opcode,
1049 MachineIRBuilder &MIRBuilder,
1050 SPIRVGlobalRegistry *GR) {
1051 // Generate SPIRV instruction accordingly.
1052 if (Call->isSpirvOp())
1053 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1054 GR->getSPIRVTypeID(Call->ReturnType));
1055
1056 MIRBuilder.buildInstr(Opcode)
1057 .addDef(Call->ReturnRegister)
1058 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1059 .addUse(Call->Arguments[0]);
1060
1061 return true;
1062}
1063
1064/// Helper function for building Intel's OpBitwiseFunctionINTEL instruction.
1066 const SPIRV::IncomingCall *Call, unsigned Opcode,
1067 MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
1068 // Generate SPIRV instruction accordingly.
1069 if (Call->isSpirvOp())
1070 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1071 GR->getSPIRVTypeID(Call->ReturnType));
1072
1073 auto MIB = MIRBuilder.buildInstr(Opcode)
1074 .addDef(Call->ReturnRegister)
1075 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1076 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1077 MIB.addUse(Call->Arguments[i]);
1078
1079 return true;
1080}
1081
1082/// Helper function for building Intel's 2d block io instructions.
1084 unsigned Opcode,
1085 MachineIRBuilder &MIRBuilder,
1086 SPIRVGlobalRegistry *GR) {
1087 // Generate SPIRV instruction accordingly.
1088 if (Call->isSpirvOp())
1089 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
1090
1091 auto MIB = MIRBuilder.buildInstr(Opcode)
1092 .addDef(Call->ReturnRegister)
1093 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1094 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1095 MIB.addUse(Call->Arguments[i]);
1096
1097 return true;
1098}
1099
1100static bool buildPipeInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
1101 unsigned Scope, MachineIRBuilder &MIRBuilder,
1102 SPIRVGlobalRegistry *GR) {
1103 switch (Opcode) {
1104 case SPIRV::OpCommitReadPipe:
1105 case SPIRV::OpCommitWritePipe:
1106 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
1107 case SPIRV::OpGroupCommitReadPipe:
1108 case SPIRV::OpGroupCommitWritePipe:
1109 case SPIRV::OpGroupReserveReadPipePackets:
1110 case SPIRV::OpGroupReserveWritePipePackets: {
1111 Register ScopeConstReg =
1112 MIRBuilder.buildConstant(LLT::scalar(32), Scope).getReg(0);
1113 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1114 MRI->setRegClass(ScopeConstReg, &SPIRV::iIDRegClass);
1116 MIB = MIRBuilder.buildInstr(Opcode);
1117 // Add Return register and type.
1118 if (Opcode == SPIRV::OpGroupReserveReadPipePackets ||
1119 Opcode == SPIRV::OpGroupReserveWritePipePackets)
1120 MIB.addDef(Call->ReturnRegister)
1121 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1122
1123 MIB.addUse(ScopeConstReg);
1124 for (unsigned int i = 0; i < Call->Arguments.size(); ++i)
1125 MIB.addUse(Call->Arguments[i]);
1126
1127 return true;
1128 }
1129 default:
1130 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1131 GR->getSPIRVTypeID(Call->ReturnType));
1132 }
1133}
1134
1135static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim) {
1136 switch (dim) {
1137 case SPIRV::Dim::DIM_1D:
1138 case SPIRV::Dim::DIM_Buffer:
1139 return 1;
1140 case SPIRV::Dim::DIM_2D:
1141 case SPIRV::Dim::DIM_Cube:
1142 case SPIRV::Dim::DIM_Rect:
1143 return 2;
1144 case SPIRV::Dim::DIM_3D:
1145 return 3;
1146 default:
1147 report_fatal_error("Cannot get num components for given Dim");
1148 }
1149}
1150
1151/// Helper function for obtaining the number of size components.
1152static unsigned getNumSizeComponents(SPIRVType *imgType) {
1153 assert(imgType->getOpcode() == SPIRV::OpTypeImage);
1154 auto dim = static_cast<SPIRV::Dim::Dim>(imgType->getOperand(2).getImm());
1155 unsigned numComps = getNumComponentsForDim(dim);
1156 bool arrayed = imgType->getOperand(4).getImm() == 1;
1157 return arrayed ? numComps + 1 : numComps;
1158}
1159
1160//===----------------------------------------------------------------------===//
1161// Implementation functions for each builtin group
1162//===----------------------------------------------------------------------===//
1163
1165 MachineIRBuilder &MIRBuilder,
1166 SPIRVGlobalRegistry *GR, const CallBase &CB) {
1167 // Lookup the extended instruction number in the TableGen records.
1168 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1170 SPIRV::lookupExtendedBuiltin(Builtin->Name, Builtin->Set)->Number;
1171 // fmin_common and fmax_common are now deprecated, and we should use fmin and
1172 // fmax with NotInf and NotNaN flags instead. Keep original number to add
1173 // later the NoNans and NoInfs flags.
1174 uint32_t OrigNumber = Number;
1175 const SPIRVSubtarget &ST =
1176 cast<SPIRVSubtarget>(MIRBuilder.getMF().getSubtarget());
1177 if (ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2) &&
1178 (Number == SPIRV::OpenCLExtInst::fmin_common ||
1179 Number == SPIRV::OpenCLExtInst::fmax_common)) {
1180 Number = (Number == SPIRV::OpenCLExtInst::fmin_common)
1181 ? SPIRV::OpenCLExtInst::fmin
1182 : SPIRV::OpenCLExtInst::fmax;
1183 }
1184
1185 // Build extended instruction.
1186 auto MIB =
1187 MIRBuilder.buildInstr(SPIRV::OpExtInst)
1188 .addDef(Call->ReturnRegister)
1189 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1190 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
1191 .addImm(Number);
1192
1193 for (auto Argument : Call->Arguments)
1194 MIB.addUse(Argument);
1195 MIB.getInstr()->copyIRFlags(CB);
1196 if (OrigNumber == SPIRV::OpenCLExtInst::fmin_common ||
1197 OrigNumber == SPIRV::OpenCLExtInst::fmax_common) {
1198 // Add NoNans and NoInfs flags to fmin/fmax instruction.
1199 MIB.getInstr()->setFlag(MachineInstr::MIFlag::FmNoNans);
1200 MIB.getInstr()->setFlag(MachineInstr::MIFlag::FmNoInfs);
1201 }
1202 return true;
1203}
1204
1206 MachineIRBuilder &MIRBuilder,
1207 SPIRVGlobalRegistry *GR) {
1208 // Lookup the instruction opcode in the TableGen records.
1209 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1210 unsigned Opcode =
1211 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1212
1213 Register CompareRegister;
1214 SPIRVType *RelationType;
1215 std::tie(CompareRegister, RelationType) =
1216 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
1217
1218 // Build relational instruction.
1219 auto MIB = MIRBuilder.buildInstr(Opcode)
1220 .addDef(CompareRegister)
1221 .addUse(GR->getSPIRVTypeID(RelationType));
1222
1223 for (auto Argument : Call->Arguments)
1224 MIB.addUse(Argument);
1225
1226 // Build select instruction.
1227 return buildSelectInst(MIRBuilder, Call->ReturnRegister, CompareRegister,
1228 Call->ReturnType, GR);
1229}
1230
1232 MachineIRBuilder &MIRBuilder,
1233 SPIRVGlobalRegistry *GR) {
1234 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1235 const SPIRV::GroupBuiltin *GroupBuiltin =
1236 SPIRV::lookupGroupBuiltin(Builtin->Name);
1237
1238 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1239 if (Call->isSpirvOp()) {
1240 if (GroupBuiltin->NoGroupOperation) {
1242 if (GroupBuiltin->Opcode ==
1243 SPIRV::OpSubgroupMatrixMultiplyAccumulateINTEL &&
1244 Call->Arguments.size() > 4)
1245 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[4], MRI));
1246 return buildOpFromWrapper(MIRBuilder, GroupBuiltin->Opcode, Call,
1247 GR->getSPIRVTypeID(Call->ReturnType), ImmArgs);
1248 }
1249
1250 // Group Operation is a literal
1251 Register GroupOpReg = Call->Arguments[1];
1252 const MachineInstr *MI = getDefInstrMaybeConstant(GroupOpReg, MRI);
1253 if (!MI || MI->getOpcode() != TargetOpcode::G_CONSTANT)
1255 "Group Operation parameter must be an integer constant");
1256 uint64_t GrpOp = MI->getOperand(1).getCImm()->getValue().getZExtValue();
1257 Register ScopeReg = Call->Arguments[0];
1258 auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode)
1259 .addDef(Call->ReturnRegister)
1260 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1261 .addUse(ScopeReg)
1262 .addImm(GrpOp);
1263 for (unsigned i = 2; i < Call->Arguments.size(); ++i)
1264 MIB.addUse(Call->Arguments[i]);
1265 return true;
1266 }
1267
1268 Register Arg0;
1269 if (GroupBuiltin->HasBoolArg) {
1270 SPIRVType *BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder, true);
1271 Register BoolReg = Call->Arguments[0];
1272 SPIRVType *BoolRegType = GR->getSPIRVTypeForVReg(BoolReg);
1273 if (!BoolRegType)
1274 report_fatal_error("Can't find a register's type definition");
1275 MachineInstr *ArgInstruction = getDefInstrMaybeConstant(BoolReg, MRI);
1276 if (ArgInstruction->getOpcode() == TargetOpcode::G_CONSTANT) {
1277 if (BoolRegType->getOpcode() != SPIRV::OpTypeBool)
1278 Arg0 = GR->buildConstantInt(getIConstVal(BoolReg, MRI), MIRBuilder,
1279 BoolType, true);
1280 } else {
1281 if (BoolRegType->getOpcode() == SPIRV::OpTypeInt) {
1282 Arg0 = MRI->createGenericVirtualRegister(LLT::scalar(1));
1283 MRI->setRegClass(Arg0, &SPIRV::iIDRegClass);
1284 GR->assignSPIRVTypeToVReg(BoolType, Arg0, MIRBuilder.getMF());
1285 MIRBuilder.buildICmp(
1286 CmpInst::ICMP_NE, Arg0, BoolReg,
1287 GR->buildConstantInt(0, MIRBuilder, BoolRegType, true));
1288 insertAssignInstr(Arg0, nullptr, BoolType, GR, MIRBuilder,
1289 MIRBuilder.getMF().getRegInfo());
1290 } else if (BoolRegType->getOpcode() != SPIRV::OpTypeBool) {
1291 report_fatal_error("Expect a boolean argument");
1292 }
1293 // if BoolReg is a boolean register, we don't need to do anything
1294 }
1295 }
1296
1297 Register GroupResultRegister = Call->ReturnRegister;
1298 SPIRVType *GroupResultType = Call->ReturnType;
1299
1300 // TODO: maybe we need to check whether the result type is already boolean
1301 // and in this case do not insert select instruction.
1302 const bool HasBoolReturnTy =
1303 GroupBuiltin->IsElect || GroupBuiltin->IsAllOrAny ||
1304 GroupBuiltin->IsAllEqual || GroupBuiltin->IsLogical ||
1305 GroupBuiltin->IsInverseBallot || GroupBuiltin->IsBallotBitExtract;
1306
1307 if (HasBoolReturnTy)
1308 std::tie(GroupResultRegister, GroupResultType) =
1309 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
1310
1311 auto Scope = Builtin->Name.starts_with("sub_group") ? SPIRV::Scope::Subgroup
1312 : SPIRV::Scope::Workgroup;
1313 Register ScopeRegister = buildConstantIntReg32(Scope, MIRBuilder, GR);
1314
1315 Register VecReg;
1316 if (GroupBuiltin->Opcode == SPIRV::OpGroupBroadcast &&
1317 Call->Arguments.size() > 2) {
1318 // For OpGroupBroadcast "LocalId must be an integer datatype. It must be a
1319 // scalar, a vector with 2 components, or a vector with 3 components.",
1320 // meaning that we must create a vector from the function arguments if
1321 // it's a work_group_broadcast(val, local_id_x, local_id_y) or
1322 // work_group_broadcast(val, local_id_x, local_id_y, local_id_z) call.
1323 Register ElemReg = Call->Arguments[1];
1324 SPIRVType *ElemType = GR->getSPIRVTypeForVReg(ElemReg);
1325 if (!ElemType || ElemType->getOpcode() != SPIRV::OpTypeInt)
1326 report_fatal_error("Expect an integer <LocalId> argument");
1327 unsigned VecLen = Call->Arguments.size() - 1;
1328 VecReg = MRI->createGenericVirtualRegister(
1329 LLT::fixed_vector(VecLen, MRI->getType(ElemReg)));
1330 MRI->setRegClass(VecReg, &SPIRV::vIDRegClass);
1331 SPIRVType *VecType =
1332 GR->getOrCreateSPIRVVectorType(ElemType, VecLen, MIRBuilder, true);
1333 GR->assignSPIRVTypeToVReg(VecType, VecReg, MIRBuilder.getMF());
1334 auto MIB =
1335 MIRBuilder.buildInstr(TargetOpcode::G_BUILD_VECTOR).addDef(VecReg);
1336 for (unsigned i = 1; i < Call->Arguments.size(); i++) {
1337 MIB.addUse(Call->Arguments[i]);
1338 setRegClassIfNull(Call->Arguments[i], MRI, GR);
1339 }
1340 insertAssignInstr(VecReg, nullptr, VecType, GR, MIRBuilder,
1341 MIRBuilder.getMF().getRegInfo());
1342 }
1343
1344 // Build work/sub group instruction.
1345 auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode)
1346 .addDef(GroupResultRegister)
1347 .addUse(GR->getSPIRVTypeID(GroupResultType))
1348 .addUse(ScopeRegister);
1349
1350 if (!GroupBuiltin->NoGroupOperation)
1351 MIB.addImm(GroupBuiltin->GroupOperation);
1352 if (Call->Arguments.size() > 0) {
1353 MIB.addUse(Arg0.isValid() ? Arg0 : Call->Arguments[0]);
1354 setRegClassIfNull(Call->Arguments[0], MRI, GR);
1355 if (VecReg.isValid())
1356 MIB.addUse(VecReg);
1357 else
1358 for (unsigned i = 1; i < Call->Arguments.size(); i++)
1359 MIB.addUse(Call->Arguments[i]);
1360 }
1361
1362 // Build select instruction.
1363 if (HasBoolReturnTy)
1364 buildSelectInst(MIRBuilder, Call->ReturnRegister, GroupResultRegister,
1365 Call->ReturnType, GR);
1366 return true;
1367}
1368
1370 MachineIRBuilder &MIRBuilder,
1371 SPIRVGlobalRegistry *GR) {
1372 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1373 MachineFunction &MF = MIRBuilder.getMF();
1374 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1375 const SPIRV::IntelSubgroupsBuiltin *IntelSubgroups =
1376 SPIRV::lookupIntelSubgroupsBuiltin(Builtin->Name);
1377
1378 if (IntelSubgroups->IsMedia &&
1379 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_media_block_io)) {
1380 std::string DiagMsg = std::string(Builtin->Name) +
1381 ": the builtin requires the following SPIR-V "
1382 "extension: SPV_INTEL_media_block_io";
1383 report_fatal_error(DiagMsg.c_str(), false);
1384 } else if (!IntelSubgroups->IsMedia &&
1385 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1386 std::string DiagMsg = std::string(Builtin->Name) +
1387 ": the builtin requires the following SPIR-V "
1388 "extension: SPV_INTEL_subgroups";
1389 report_fatal_error(DiagMsg.c_str(), false);
1390 }
1391
1392 uint32_t OpCode = IntelSubgroups->Opcode;
1393 if (Call->isSpirvOp()) {
1394 bool IsSet = OpCode != SPIRV::OpSubgroupBlockWriteINTEL &&
1395 OpCode != SPIRV::OpSubgroupImageBlockWriteINTEL &&
1396 OpCode != SPIRV::OpSubgroupImageMediaBlockWriteINTEL;
1397 return buildOpFromWrapper(MIRBuilder, OpCode, Call,
1398 IsSet ? GR->getSPIRVTypeID(Call->ReturnType)
1399 : Register(0));
1400 }
1401
1402 if (IntelSubgroups->IsBlock) {
1403 // Minimal number or arguments set in TableGen records is 1
1404 if (SPIRVType *Arg0Type = GR->getSPIRVTypeForVReg(Call->Arguments[0])) {
1405 if (Arg0Type->getOpcode() == SPIRV::OpTypeImage) {
1406 // TODO: add required validation from the specification:
1407 // "'Image' must be an object whose type is OpTypeImage with a 'Sampled'
1408 // operand of 0 or 2. If the 'Sampled' operand is 2, then some
1409 // dimensions require a capability."
1410 switch (OpCode) {
1411 case SPIRV::OpSubgroupBlockReadINTEL:
1412 OpCode = SPIRV::OpSubgroupImageBlockReadINTEL;
1413 break;
1414 case SPIRV::OpSubgroupBlockWriteINTEL:
1415 OpCode = SPIRV::OpSubgroupImageBlockWriteINTEL;
1416 break;
1417 }
1418 }
1419 }
1420 }
1421
1422 // TODO: opaque pointers types should be eventually resolved in such a way
1423 // that validation of block read is enabled with respect to the following
1424 // specification requirement:
1425 // "'Result Type' may be a scalar or vector type, and its component type must
1426 // be equal to the type pointed to by 'Ptr'."
1427 // For example, function parameter type should not be default i8 pointer, but
1428 // depend on the result type of the instruction where it is used as a pointer
1429 // argument of OpSubgroupBlockReadINTEL
1430
1431 // Build Intel subgroups instruction
1433 IntelSubgroups->IsWrite
1434 ? MIRBuilder.buildInstr(OpCode)
1435 : MIRBuilder.buildInstr(OpCode)
1436 .addDef(Call->ReturnRegister)
1437 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1438 for (size_t i = 0; i < Call->Arguments.size(); ++i)
1439 MIB.addUse(Call->Arguments[i]);
1440 return true;
1441}
1442
1444 MachineIRBuilder &MIRBuilder,
1445 SPIRVGlobalRegistry *GR) {
1446 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1447 MachineFunction &MF = MIRBuilder.getMF();
1448 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1449 if (!ST->canUseExtension(
1450 SPIRV::Extension::SPV_KHR_uniform_group_instructions)) {
1451 std::string DiagMsg = std::string(Builtin->Name) +
1452 ": the builtin requires the following SPIR-V "
1453 "extension: SPV_KHR_uniform_group_instructions";
1454 report_fatal_error(DiagMsg.c_str(), false);
1455 }
1456 const SPIRV::GroupUniformBuiltin *GroupUniform =
1457 SPIRV::lookupGroupUniformBuiltin(Builtin->Name);
1458 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1459
1460 Register GroupResultReg = Call->ReturnRegister;
1461 Register ScopeReg = Call->Arguments[0];
1462 Register ValueReg = Call->Arguments[2];
1463
1464 // Group Operation
1465 Register ConstGroupOpReg = Call->Arguments[1];
1466 const MachineInstr *Const = getDefInstrMaybeConstant(ConstGroupOpReg, MRI);
1467 if (!Const || Const->getOpcode() != TargetOpcode::G_CONSTANT)
1469 "expect a constant group operation for a uniform group instruction",
1470 false);
1471 const MachineOperand &ConstOperand = Const->getOperand(1);
1472 if (!ConstOperand.isCImm())
1473 report_fatal_error("uniform group instructions: group operation must be an "
1474 "integer constant",
1475 false);
1476
1477 auto MIB = MIRBuilder.buildInstr(GroupUniform->Opcode)
1478 .addDef(GroupResultReg)
1479 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1480 .addUse(ScopeReg);
1481 addNumImm(ConstOperand.getCImm()->getValue(), MIB);
1482 MIB.addUse(ValueReg);
1483
1484 return true;
1485}
1486
1488 MachineIRBuilder &MIRBuilder,
1489 SPIRVGlobalRegistry *GR) {
1490 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1491 MachineFunction &MF = MIRBuilder.getMF();
1492 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1493 if (!ST->canUseExtension(SPIRV::Extension::SPV_KHR_shader_clock)) {
1494 std::string DiagMsg = std::string(Builtin->Name) +
1495 ": the builtin requires the following SPIR-V "
1496 "extension: SPV_KHR_shader_clock";
1497 report_fatal_error(DiagMsg.c_str(), false);
1498 }
1499
1500 Register ResultReg = Call->ReturnRegister;
1501
1502 if (Builtin->Name == "__spirv_ReadClockKHR") {
1503 MIRBuilder.buildInstr(SPIRV::OpReadClockKHR)
1504 .addDef(ResultReg)
1505 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1506 .addUse(Call->Arguments[0]);
1507 } else {
1508 // Deduce the `Scope` operand from the builtin function name.
1509 SPIRV::Scope::Scope ScopeArg =
1511 .EndsWith("device", SPIRV::Scope::Scope::Device)
1512 .EndsWith("work_group", SPIRV::Scope::Scope::Workgroup)
1513 .EndsWith("sub_group", SPIRV::Scope::Scope::Subgroup);
1514 Register ScopeReg = buildConstantIntReg32(ScopeArg, MIRBuilder, GR);
1515
1516 MIRBuilder.buildInstr(SPIRV::OpReadClockKHR)
1517 .addDef(ResultReg)
1518 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1519 .addUse(ScopeReg);
1520 }
1521
1522 return true;
1523}
1524
1525// These queries ask for a single size_t result for a given dimension index,
1526// e.g. size_t get_global_id(uint dimindex). In SPIR-V, the builtins
1527// corresponding to these values are all vec3 types, so we need to extract the
1528// correct index or return DefaultValue (0 or 1 depending on the query). We also
1529// handle extending or truncating in case size_t does not match the expected
1530// result type's bitwidth.
1531//
1532// For a constant index >= 3 we generate:
1533// %res = OpConstant %SizeT DefaultValue
1534//
1535// For other indices we generate:
1536// %g = OpVariable %ptr_V3_SizeT Input
1537// OpDecorate %g BuiltIn XXX
1538// OpDecorate %g LinkageAttributes "__spirv_BuiltInXXX"
1539// OpDecorate %g Constant
1540// %loadedVec = OpLoad %V3_SizeT %g
1541//
1542// Then, if the index is constant < 3, we generate:
1543// %res = OpCompositeExtract %SizeT %loadedVec idx
1544// If the index is dynamic, we generate:
1545// %tmp = OpVectorExtractDynamic %SizeT %loadedVec %idx
1546// %cmp = OpULessThan %bool %idx %const_3
1547// %res = OpSelect %SizeT %cmp %tmp %const_<DefaultValue>
1548//
1549// If the bitwidth of %res does not match the expected return type, we add an
1550// extend or truncate.
1552 MachineIRBuilder &MIRBuilder,
1554 SPIRV::BuiltIn::BuiltIn BuiltinValue,
1555 uint64_t DefaultValue) {
1556 Register IndexRegister = Call->Arguments[0];
1557 const unsigned ResultWidth = Call->ReturnType->getOperand(1).getImm();
1558 const unsigned PointerSize = GR->getPointerSize();
1559 const SPIRVType *PointerSizeType =
1560 GR->getOrCreateSPIRVIntegerType(PointerSize, MIRBuilder);
1561 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1562 auto IndexInstruction = getDefInstrMaybeConstant(IndexRegister, MRI);
1563
1564 // Set up the final register to do truncation or extension on at the end.
1565 Register ToTruncate = Call->ReturnRegister;
1566
1567 // If the index is constant, we can statically determine if it is in range.
1568 bool IsConstantIndex =
1569 IndexInstruction->getOpcode() == TargetOpcode::G_CONSTANT;
1570
1571 // If it's out of range (max dimension is 3), we can just return the constant
1572 // default value (0 or 1 depending on which query function).
1573 if (IsConstantIndex && getIConstVal(IndexRegister, MRI) >= 3) {
1574 Register DefaultReg = Call->ReturnRegister;
1575 if (PointerSize != ResultWidth) {
1576 DefaultReg = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1577 MRI->setRegClass(DefaultReg, &SPIRV::iIDRegClass);
1578 GR->assignSPIRVTypeToVReg(PointerSizeType, DefaultReg,
1579 MIRBuilder.getMF());
1580 ToTruncate = DefaultReg;
1581 }
1582 auto NewRegister =
1583 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType, true);
1584 MIRBuilder.buildCopy(DefaultReg, NewRegister);
1585 } else { // If it could be in range, we need to load from the given builtin.
1586 auto Vec3Ty =
1587 GR->getOrCreateSPIRVVectorType(PointerSizeType, 3, MIRBuilder, true);
1588 Register LoadedVector =
1589 buildBuiltinVariableLoad(MIRBuilder, Vec3Ty, GR, BuiltinValue,
1590 LLT::fixed_vector(3, PointerSize));
1591 // Set up the vreg to extract the result to (possibly a new temporary one).
1592 Register Extracted = Call->ReturnRegister;
1593 if (!IsConstantIndex || PointerSize != ResultWidth) {
1594 Extracted = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1595 MRI->setRegClass(Extracted, &SPIRV::iIDRegClass);
1596 GR->assignSPIRVTypeToVReg(PointerSizeType, Extracted, MIRBuilder.getMF());
1597 }
1598 // Use Intrinsic::spv_extractelt so dynamic vs static extraction is
1599 // handled later: extr = spv_extractelt LoadedVector, IndexRegister.
1600 MachineInstrBuilder ExtractInst = MIRBuilder.buildIntrinsic(
1601 Intrinsic::spv_extractelt, ArrayRef<Register>{Extracted}, true, false);
1602 ExtractInst.addUse(LoadedVector).addUse(IndexRegister);
1603
1604 // If the index is dynamic, need check if it's < 3, and then use a select.
1605 if (!IsConstantIndex) {
1606 insertAssignInstr(Extracted, nullptr, PointerSizeType, GR, MIRBuilder,
1607 *MRI);
1608
1609 auto IndexType = GR->getSPIRVTypeForVReg(IndexRegister);
1610 auto BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder, true);
1611
1612 Register CompareRegister =
1613 MRI->createGenericVirtualRegister(LLT::scalar(1));
1614 MRI->setRegClass(CompareRegister, &SPIRV::iIDRegClass);
1615 GR->assignSPIRVTypeToVReg(BoolType, CompareRegister, MIRBuilder.getMF());
1616
1617 // Use G_ICMP to check if idxVReg < 3.
1618 MIRBuilder.buildICmp(
1619 CmpInst::ICMP_ULT, CompareRegister, IndexRegister,
1620 GR->buildConstantInt(3, MIRBuilder, IndexType, true));
1621
1622 // Get constant for the default value (0 or 1 depending on which
1623 // function).
1624 Register DefaultRegister =
1625 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType, true);
1626
1627 // Get a register for the selection result (possibly a new temporary one).
1628 Register SelectionResult = Call->ReturnRegister;
1629 if (PointerSize != ResultWidth) {
1630 SelectionResult =
1631 MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1632 MRI->setRegClass(SelectionResult, &SPIRV::iIDRegClass);
1633 GR->assignSPIRVTypeToVReg(PointerSizeType, SelectionResult,
1634 MIRBuilder.getMF());
1635 }
1636 // Create the final G_SELECT to return the extracted value or the default.
1637 MIRBuilder.buildSelect(SelectionResult, CompareRegister, Extracted,
1638 DefaultRegister);
1639 ToTruncate = SelectionResult;
1640 } else {
1641 ToTruncate = Extracted;
1642 }
1643 }
1644 // Alter the result's bitwidth if it does not match the SizeT value extracted.
1645 if (PointerSize != ResultWidth)
1646 MIRBuilder.buildZExtOrTrunc(Call->ReturnRegister, ToTruncate);
1647 return true;
1648}
1649
1651 MachineIRBuilder &MIRBuilder,
1652 SPIRVGlobalRegistry *GR) {
1653 // Lookup the builtin variable record.
1654 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1655 SPIRV::BuiltIn::BuiltIn Value =
1656 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1657
1658 if (Value == SPIRV::BuiltIn::GlobalInvocationId)
1659 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, 0);
1660
1661 // Build a load instruction for the builtin variable.
1662 unsigned BitWidth = GR->getScalarOrVectorBitWidth(Call->ReturnType);
1663 LLT LLType;
1664 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeVector)
1665 LLType =
1666 LLT::fixed_vector(Call->ReturnType->getOperand(2).getImm(), BitWidth);
1667 else
1668 LLType = LLT::scalar(BitWidth);
1669
1670 return buildBuiltinVariableLoad(MIRBuilder, Call->ReturnType, GR, Value,
1671 LLType, Call->ReturnRegister);
1672}
1673
1675 MachineIRBuilder &MIRBuilder,
1676 SPIRVGlobalRegistry *GR) {
1677 // Lookup the instruction opcode in the TableGen records.
1678 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1679 unsigned Opcode =
1680 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1681
1682 switch (Opcode) {
1683 case SPIRV::OpStore:
1684 return buildAtomicInitInst(Call, MIRBuilder);
1685 case SPIRV::OpAtomicLoad:
1686 return buildAtomicLoadInst(Call, MIRBuilder, GR);
1687 case SPIRV::OpAtomicStore:
1688 return buildAtomicStoreInst(Call, MIRBuilder, GR);
1689 case SPIRV::OpAtomicCompareExchange:
1690 case SPIRV::OpAtomicCompareExchangeWeak:
1691 return buildAtomicCompareExchangeInst(Call, Builtin, Opcode, MIRBuilder,
1692 GR);
1693 case SPIRV::OpAtomicIAdd:
1694 case SPIRV::OpAtomicISub:
1695 case SPIRV::OpAtomicOr:
1696 case SPIRV::OpAtomicXor:
1697 case SPIRV::OpAtomicAnd:
1698 case SPIRV::OpAtomicExchange:
1699 return buildAtomicRMWInst(Call, Opcode, MIRBuilder, GR);
1700 case SPIRV::OpMemoryBarrier:
1701 return buildBarrierInst(Call, SPIRV::OpMemoryBarrier, MIRBuilder, GR);
1702 case SPIRV::OpAtomicFlagTestAndSet:
1703 case SPIRV::OpAtomicFlagClear:
1704 return buildAtomicFlagInst(Call, Opcode, MIRBuilder, GR);
1705 default:
1706 if (Call->isSpirvOp())
1707 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1708 GR->getSPIRVTypeID(Call->ReturnType));
1709 return false;
1710 }
1711}
1712
1714 MachineIRBuilder &MIRBuilder,
1715 SPIRVGlobalRegistry *GR) {
1716 // Lookup the instruction opcode in the TableGen records.
1717 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1718 unsigned Opcode = SPIRV::lookupAtomicFloatingBuiltin(Builtin->Name)->Opcode;
1719
1720 switch (Opcode) {
1721 case SPIRV::OpAtomicFAddEXT:
1722 case SPIRV::OpAtomicFMinEXT:
1723 case SPIRV::OpAtomicFMaxEXT:
1724 return buildAtomicFloatingRMWInst(Call, Opcode, MIRBuilder, GR);
1725 default:
1726 return false;
1727 }
1728}
1729
1731 MachineIRBuilder &MIRBuilder,
1732 SPIRVGlobalRegistry *GR) {
1733 // Lookup the instruction opcode in the TableGen records.
1734 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1735 unsigned Opcode =
1736 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1737
1738 return buildBarrierInst(Call, Opcode, MIRBuilder, GR);
1739}
1740
1742 MachineIRBuilder &MIRBuilder,
1743 SPIRVGlobalRegistry *GR) {
1744 // Lookup the instruction opcode in the TableGen records.
1745 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1746 unsigned Opcode =
1747 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1748
1749 if (Opcode == SPIRV::OpGenericCastToPtrExplicit) {
1750 SPIRV::StorageClass::StorageClass ResSC =
1751 GR->getPointerStorageClass(Call->ReturnRegister);
1752 if (!isGenericCastablePtr(ResSC))
1753 return false;
1754
1755 MIRBuilder.buildInstr(Opcode)
1756 .addDef(Call->ReturnRegister)
1757 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1758 .addUse(Call->Arguments[0])
1759 .addImm(ResSC);
1760 } else {
1761 MIRBuilder.buildInstr(TargetOpcode::G_ADDRSPACE_CAST)
1762 .addDef(Call->ReturnRegister)
1763 .addUse(Call->Arguments[0]);
1764 }
1765 return true;
1766}
1767
1768static bool generateDotOrFMulInst(const StringRef DemangledCall,
1770 MachineIRBuilder &MIRBuilder,
1771 SPIRVGlobalRegistry *GR) {
1772 if (Call->isSpirvOp())
1773 return buildOpFromWrapper(MIRBuilder, SPIRV::OpDot, Call,
1774 GR->getSPIRVTypeID(Call->ReturnType));
1775
1776 bool IsVec = GR->getSPIRVTypeForVReg(Call->Arguments[0])->getOpcode() ==
1777 SPIRV::OpTypeVector;
1778 // Use OpDot only in case of vector args and OpFMul in case of scalar args.
1779 uint32_t OC = IsVec ? SPIRV::OpDot : SPIRV::OpFMulS;
1780 bool IsSwapReq = false;
1781
1782 const auto *ST =
1783 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
1784 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt) &&
1785 (ST->canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
1786 ST->isAtLeastSPIRVVer(VersionTuple(1, 6)))) {
1787 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1788 const SPIRV::IntegerDotProductBuiltin *IntDot =
1789 SPIRV::lookupIntegerDotProductBuiltin(Builtin->Name);
1790 if (IntDot) {
1791 OC = IntDot->Opcode;
1792 IsSwapReq = IntDot->IsSwapReq;
1793 } else if (IsVec) {
1794 // Handling "dot" and "dot_acc_sat" builtins which use vectors of
1795 // integers.
1796 LLVMContext &Ctx = MIRBuilder.getContext();
1798 SPIRV::parseBuiltinTypeStr(TypeStrs, DemangledCall, Ctx);
1799 bool IsFirstSigned = TypeStrs[0].trim()[0] != 'u';
1800 bool IsSecondSigned = TypeStrs[1].trim()[0] != 'u';
1801
1802 if (Call->BuiltinName == "dot") {
1803 if (IsFirstSigned && IsSecondSigned)
1804 OC = SPIRV::OpSDot;
1805 else if (!IsFirstSigned && !IsSecondSigned)
1806 OC = SPIRV::OpUDot;
1807 else {
1808 OC = SPIRV::OpSUDot;
1809 if (!IsFirstSigned)
1810 IsSwapReq = true;
1811 }
1812 } else if (Call->BuiltinName == "dot_acc_sat") {
1813 if (IsFirstSigned && IsSecondSigned)
1814 OC = SPIRV::OpSDotAccSat;
1815 else if (!IsFirstSigned && !IsSecondSigned)
1816 OC = SPIRV::OpUDotAccSat;
1817 else {
1818 OC = SPIRV::OpSUDotAccSat;
1819 if (!IsFirstSigned)
1820 IsSwapReq = true;
1821 }
1822 }
1823 }
1824 }
1825
1826 MachineInstrBuilder MIB = MIRBuilder.buildInstr(OC)
1827 .addDef(Call->ReturnRegister)
1828 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1829
1830 if (IsSwapReq) {
1831 MIB.addUse(Call->Arguments[1]);
1832 MIB.addUse(Call->Arguments[0]);
1833 // needed for dot_acc_sat* builtins
1834 for (size_t i = 2; i < Call->Arguments.size(); ++i)
1835 MIB.addUse(Call->Arguments[i]);
1836 } else {
1837 for (size_t i = 0; i < Call->Arguments.size(); ++i)
1838 MIB.addUse(Call->Arguments[i]);
1839 }
1840
1841 // Add Packed Vector Format for Integer dot product builtins if arguments are
1842 // scalar
1843 if (!IsVec && OC != SPIRV::OpFMulS)
1844 MIB.addImm(SPIRV::PackedVectorFormat4x8Bit);
1845
1846 return true;
1847}
1848
1850 MachineIRBuilder &MIRBuilder,
1851 SPIRVGlobalRegistry *GR) {
1852 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1853 SPIRV::BuiltIn::BuiltIn Value =
1854 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1855
1856 // For now, we only support a single Wave intrinsic with a single return type.
1857 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt);
1858 LLT LLType = LLT::scalar(GR->getScalarOrVectorBitWidth(Call->ReturnType));
1859
1861 MIRBuilder, Call->ReturnType, GR, Value, LLType, Call->ReturnRegister,
1862 /* isConst= */ false, /* LinkageType= */ std::nullopt);
1863}
1864
1865// We expect a builtin
1866// Name(ptr sret([RetType]) %result, Type %operand1, Type %operand1)
1867// where %result is a pointer to where the result of the builtin execution
1868// is to be stored, and generate the following instructions:
1869// Res = Opcode RetType Operand1 Operand1
1870// OpStore RetVariable Res
1872 MachineIRBuilder &MIRBuilder,
1873 SPIRVGlobalRegistry *GR) {
1874 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1875 unsigned Opcode =
1876 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1877
1878 Register SRetReg = Call->Arguments[0];
1879 SPIRVType *PtrRetType = GR->getSPIRVTypeForVReg(SRetReg);
1880 SPIRVType *RetType = GR->getPointeeType(PtrRetType);
1881 if (!RetType)
1882 report_fatal_error("The first parameter must be a pointer");
1883 if (RetType->getOpcode() != SPIRV::OpTypeStruct)
1884 report_fatal_error("Expected struct type result for the arithmetic with "
1885 "overflow builtins");
1886
1887 SPIRVType *OpType1 = GR->getSPIRVTypeForVReg(Call->Arguments[1]);
1888 SPIRVType *OpType2 = GR->getSPIRVTypeForVReg(Call->Arguments[2]);
1889 if (!OpType1 || !OpType2 || OpType1 != OpType2)
1890 report_fatal_error("Operands must have the same type");
1891 if (OpType1->getOpcode() == SPIRV::OpTypeVector)
1892 switch (Opcode) {
1893 case SPIRV::OpIAddCarryS:
1894 Opcode = SPIRV::OpIAddCarryV;
1895 break;
1896 case SPIRV::OpISubBorrowS:
1897 Opcode = SPIRV::OpISubBorrowV;
1898 break;
1899 }
1900
1901 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1902 Register ResReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1903 if (const TargetRegisterClass *DstRC =
1904 MRI->getRegClassOrNull(Call->Arguments[1])) {
1905 MRI->setRegClass(ResReg, DstRC);
1906 MRI->setType(ResReg, MRI->getType(Call->Arguments[1]));
1907 } else {
1908 MRI->setType(ResReg, LLT::scalar(64));
1909 }
1910 GR->assignSPIRVTypeToVReg(RetType, ResReg, MIRBuilder.getMF());
1911 MIRBuilder.buildInstr(Opcode)
1912 .addDef(ResReg)
1913 .addUse(GR->getSPIRVTypeID(RetType))
1914 .addUse(Call->Arguments[1])
1915 .addUse(Call->Arguments[2]);
1916 MIRBuilder.buildInstr(SPIRV::OpStore).addUse(SRetReg).addUse(ResReg);
1917 return true;
1918}
1919
1921 MachineIRBuilder &MIRBuilder,
1922 SPIRVGlobalRegistry *GR) {
1923 // Lookup the builtin record.
1924 SPIRV::BuiltIn::BuiltIn Value =
1925 SPIRV::lookupGetBuiltin(Call->Builtin->Name, Call->Builtin->Set)->Value;
1926 const bool IsDefaultOne = (Value == SPIRV::BuiltIn::GlobalSize ||
1927 Value == SPIRV::BuiltIn::NumWorkgroups ||
1928 Value == SPIRV::BuiltIn::WorkgroupSize ||
1929 Value == SPIRV::BuiltIn::EnqueuedWorkgroupSize);
1930 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, IsDefaultOne ? 1 : 0);
1931}
1932
1934 MachineIRBuilder &MIRBuilder,
1935 SPIRVGlobalRegistry *GR) {
1936 // Lookup the image size query component number in the TableGen records.
1937 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1938 uint32_t Component =
1939 SPIRV::lookupImageQueryBuiltin(Builtin->Name, Builtin->Set)->Component;
1940 // Query result may either be a vector or a scalar. If return type is not a
1941 // vector, expect only a single size component. Otherwise get the number of
1942 // expected components.
1943 unsigned NumExpectedRetComponents =
1944 Call->ReturnType->getOpcode() == SPIRV::OpTypeVector
1945 ? Call->ReturnType->getOperand(2).getImm()
1946 : 1;
1947 // Get the actual number of query result/size components.
1948 SPIRVType *ImgType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
1949 unsigned NumActualRetComponents = getNumSizeComponents(ImgType);
1950 Register QueryResult = Call->ReturnRegister;
1951 SPIRVType *QueryResultType = Call->ReturnType;
1952 if (NumExpectedRetComponents != NumActualRetComponents) {
1953 unsigned Bitwidth = Call->ReturnType->getOpcode() == SPIRV::OpTypeInt
1954 ? Call->ReturnType->getOperand(1).getImm()
1955 : 32;
1956 QueryResult = MIRBuilder.getMRI()->createGenericVirtualRegister(
1957 LLT::fixed_vector(NumActualRetComponents, Bitwidth));
1958 MIRBuilder.getMRI()->setRegClass(QueryResult, &SPIRV::vIDRegClass);
1959 SPIRVType *IntTy = GR->getOrCreateSPIRVIntegerType(Bitwidth, MIRBuilder);
1960 QueryResultType = GR->getOrCreateSPIRVVectorType(
1961 IntTy, NumActualRetComponents, MIRBuilder, true);
1962 GR->assignSPIRVTypeToVReg(QueryResultType, QueryResult, MIRBuilder.getMF());
1963 }
1964 bool IsDimBuf = ImgType->getOperand(2).getImm() == SPIRV::Dim::DIM_Buffer;
1965 unsigned Opcode =
1966 IsDimBuf ? SPIRV::OpImageQuerySize : SPIRV::OpImageQuerySizeLod;
1967 auto MIB = MIRBuilder.buildInstr(Opcode)
1968 .addDef(QueryResult)
1969 .addUse(GR->getSPIRVTypeID(QueryResultType))
1970 .addUse(Call->Arguments[0]);
1971 if (!IsDimBuf)
1972 MIB.addUse(buildConstantIntReg32(0, MIRBuilder, GR)); // Lod id.
1973 if (NumExpectedRetComponents == NumActualRetComponents)
1974 return true;
1975 if (NumExpectedRetComponents == 1) {
1976 // Only 1 component is expected, build OpCompositeExtract instruction.
1977 unsigned ExtractedComposite =
1978 Component == 3 ? NumActualRetComponents - 1 : Component;
1979 assert(ExtractedComposite < NumActualRetComponents &&
1980 "Invalid composite index!");
1981 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
1982 SPIRVType *NewType = nullptr;
1983 if (QueryResultType->getOpcode() == SPIRV::OpTypeVector) {
1984 Register NewTypeReg = QueryResultType->getOperand(1).getReg();
1985 if (TypeReg != NewTypeReg &&
1986 (NewType = GR->getSPIRVTypeForVReg(NewTypeReg)) != nullptr)
1987 TypeReg = NewTypeReg;
1988 }
1989 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
1990 .addDef(Call->ReturnRegister)
1991 .addUse(TypeReg)
1992 .addUse(QueryResult)
1993 .addImm(ExtractedComposite);
1994 if (NewType != nullptr)
1995 insertAssignInstr(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder,
1996 MIRBuilder.getMF().getRegInfo());
1997 } else {
1998 // More than 1 component is expected, fill a new vector.
1999 auto MIB = MIRBuilder.buildInstr(SPIRV::OpVectorShuffle)
2000 .addDef(Call->ReturnRegister)
2001 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2002 .addUse(QueryResult)
2003 .addUse(QueryResult);
2004 for (unsigned i = 0; i < NumExpectedRetComponents; ++i)
2005 MIB.addImm(i < NumActualRetComponents ? i : 0xffffffff);
2006 }
2007 return true;
2008}
2009
2011 MachineIRBuilder &MIRBuilder,
2012 SPIRVGlobalRegistry *GR) {
2013 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt &&
2014 "Image samples query result must be of int type!");
2015
2016 // Lookup the instruction opcode in the TableGen records.
2017 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2018 unsigned Opcode =
2019 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2020
2021 Register Image = Call->Arguments[0];
2022 SPIRV::Dim::Dim ImageDimensionality = static_cast<SPIRV::Dim::Dim>(
2023 GR->getSPIRVTypeForVReg(Image)->getOperand(2).getImm());
2024 (void)ImageDimensionality;
2025
2026 switch (Opcode) {
2027 case SPIRV::OpImageQuerySamples:
2028 assert(ImageDimensionality == SPIRV::Dim::DIM_2D &&
2029 "Image must be of 2D dimensionality");
2030 break;
2031 case SPIRV::OpImageQueryLevels:
2032 assert((ImageDimensionality == SPIRV::Dim::DIM_1D ||
2033 ImageDimensionality == SPIRV::Dim::DIM_2D ||
2034 ImageDimensionality == SPIRV::Dim::DIM_3D ||
2035 ImageDimensionality == SPIRV::Dim::DIM_Cube) &&
2036 "Image must be of 1D/2D/3D/Cube dimensionality");
2037 break;
2038 }
2039
2040 MIRBuilder.buildInstr(Opcode)
2041 .addDef(Call->ReturnRegister)
2042 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2043 .addUse(Image);
2044 return true;
2045}
2046
2047// TODO: Move to TableGen.
2048static SPIRV::SamplerAddressingMode::SamplerAddressingMode
2050 switch (Bitmask & SPIRV::CLK_ADDRESS_MODE_MASK) {
2051 case SPIRV::CLK_ADDRESS_CLAMP:
2052 return SPIRV::SamplerAddressingMode::Clamp;
2053 case SPIRV::CLK_ADDRESS_CLAMP_TO_EDGE:
2054 return SPIRV::SamplerAddressingMode::ClampToEdge;
2055 case SPIRV::CLK_ADDRESS_REPEAT:
2056 return SPIRV::SamplerAddressingMode::Repeat;
2057 case SPIRV::CLK_ADDRESS_MIRRORED_REPEAT:
2058 return SPIRV::SamplerAddressingMode::RepeatMirrored;
2059 case SPIRV::CLK_ADDRESS_NONE:
2060 return SPIRV::SamplerAddressingMode::None;
2061 default:
2062 report_fatal_error("Unknown CL address mode");
2063 }
2064}
2065
2066static unsigned getSamplerParamFromBitmask(unsigned Bitmask) {
2067 return (Bitmask & SPIRV::CLK_NORMALIZED_COORDS_TRUE) ? 1 : 0;
2068}
2069
2070static SPIRV::SamplerFilterMode::SamplerFilterMode
2072 if (Bitmask & SPIRV::CLK_FILTER_LINEAR)
2073 return SPIRV::SamplerFilterMode::Linear;
2074 if (Bitmask & SPIRV::CLK_FILTER_NEAREST)
2075 return SPIRV::SamplerFilterMode::Nearest;
2076 return SPIRV::SamplerFilterMode::Nearest;
2077}
2078
2079static bool generateReadImageInst(const StringRef DemangledCall,
2081 MachineIRBuilder &MIRBuilder,
2082 SPIRVGlobalRegistry *GR) {
2083 if (Call->isSpirvOp())
2084 return buildOpFromWrapper(MIRBuilder, SPIRV::OpImageRead, Call,
2085 GR->getSPIRVTypeID(Call->ReturnType));
2086 Register Image = Call->Arguments[0];
2087 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2088 bool HasOclSampler = DemangledCall.contains_insensitive("ocl_sampler");
2089 bool HasMsaa = DemangledCall.contains_insensitive("msaa");
2090 if (HasOclSampler) {
2091 Register Sampler = Call->Arguments[1];
2092
2093 if (!GR->isScalarOfType(Sampler, SPIRV::OpTypeSampler) &&
2094 getDefInstrMaybeConstant(Sampler, MRI)->getOperand(1).isCImm()) {
2095 uint64_t SamplerMask = getIConstVal(Sampler, MRI);
2096 Sampler = GR->buildConstantSampler(
2098 getSamplerParamFromBitmask(SamplerMask),
2099 getSamplerFilterModeFromBitmask(SamplerMask), MIRBuilder);
2100 }
2101 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
2102 SPIRVType *SampledImageType =
2103 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
2104 Register SampledImage = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2105
2106 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
2107 .addDef(SampledImage)
2108 .addUse(GR->getSPIRVTypeID(SampledImageType))
2109 .addUse(Image)
2110 .addUse(Sampler);
2111
2113 MIRBuilder);
2114
2115 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeVector) {
2116 SPIRVType *TempType =
2117 GR->getOrCreateSPIRVVectorType(Call->ReturnType, 4, MIRBuilder, true);
2118 Register TempRegister =
2119 MRI->createGenericVirtualRegister(GR->getRegType(TempType));
2120 MRI->setRegClass(TempRegister, GR->getRegClass(TempType));
2121 GR->assignSPIRVTypeToVReg(TempType, TempRegister, MIRBuilder.getMF());
2122 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
2123 .addDef(TempRegister)
2124 .addUse(GR->getSPIRVTypeID(TempType))
2125 .addUse(SampledImage)
2126 .addUse(Call->Arguments[2]) // Coordinate.
2127 .addImm(SPIRV::ImageOperand::Lod)
2128 .addUse(Lod);
2129 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
2130 .addDef(Call->ReturnRegister)
2131 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2132 .addUse(TempRegister)
2133 .addImm(0);
2134 } else {
2135 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
2136 .addDef(Call->ReturnRegister)
2137 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2138 .addUse(SampledImage)
2139 .addUse(Call->Arguments[2]) // Coordinate.
2140 .addImm(SPIRV::ImageOperand::Lod)
2141 .addUse(Lod);
2142 }
2143 } else if (HasMsaa) {
2144 MIRBuilder.buildInstr(SPIRV::OpImageRead)
2145 .addDef(Call->ReturnRegister)
2146 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2147 .addUse(Image)
2148 .addUse(Call->Arguments[1]) // Coordinate.
2149 .addImm(SPIRV::ImageOperand::Sample)
2150 .addUse(Call->Arguments[2]);
2151 } else {
2152 MIRBuilder.buildInstr(SPIRV::OpImageRead)
2153 .addDef(Call->ReturnRegister)
2154 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2155 .addUse(Image)
2156 .addUse(Call->Arguments[1]); // Coordinate.
2157 }
2158 return true;
2159}
2160
2162 MachineIRBuilder &MIRBuilder,
2163 SPIRVGlobalRegistry *GR) {
2164 if (Call->isSpirvOp())
2165 return buildOpFromWrapper(MIRBuilder, SPIRV::OpImageWrite, Call,
2166 Register(0));
2167 MIRBuilder.buildInstr(SPIRV::OpImageWrite)
2168 .addUse(Call->Arguments[0]) // Image.
2169 .addUse(Call->Arguments[1]) // Coordinate.
2170 .addUse(Call->Arguments[2]); // Texel.
2171 return true;
2172}
2173
2174static bool generateSampleImageInst(const StringRef DemangledCall,
2176 MachineIRBuilder &MIRBuilder,
2177 SPIRVGlobalRegistry *GR) {
2178 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2179 if (Call->Builtin->Name.contains_insensitive(
2180 "__translate_sampler_initializer")) {
2181 // Build sampler literal.
2182 uint64_t Bitmask = getIConstVal(Call->Arguments[0], MRI);
2183 Register Sampler = GR->buildConstantSampler(
2184 Call->ReturnRegister, getSamplerAddressingModeFromBitmask(Bitmask),
2186 getSamplerFilterModeFromBitmask(Bitmask), MIRBuilder);
2187 return Sampler.isValid();
2188 } else if (Call->Builtin->Name.contains_insensitive("__spirv_SampledImage")) {
2189 // Create OpSampledImage.
2190 Register Image = Call->Arguments[0];
2191 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
2192 SPIRVType *SampledImageType =
2193 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
2194 Register SampledImage =
2195 Call->ReturnRegister.isValid()
2196 ? Call->ReturnRegister
2197 : MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2198 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
2199 .addDef(SampledImage)
2200 .addUse(GR->getSPIRVTypeID(SampledImageType))
2201 .addUse(Image)
2202 .addUse(Call->Arguments[1]); // Sampler.
2203 return true;
2204 } else if (Call->Builtin->Name.contains_insensitive(
2205 "__spirv_ImageSampleExplicitLod")) {
2206 // Sample an image using an explicit level of detail.
2207 std::string ReturnType = DemangledCall.str();
2208 if (DemangledCall.contains("_R")) {
2209 ReturnType = ReturnType.substr(ReturnType.find("_R") + 2);
2210 ReturnType = ReturnType.substr(0, ReturnType.find('('));
2211 }
2212 SPIRVType *Type =
2213 Call->ReturnType
2214 ? Call->ReturnType
2215 : GR->getOrCreateSPIRVTypeByName(ReturnType, MIRBuilder, true);
2216 if (!Type) {
2217 std::string DiagMsg =
2218 "Unable to recognize SPIRV type name: " + ReturnType;
2219 report_fatal_error(DiagMsg.c_str());
2220 }
2221 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
2222 .addDef(Call->ReturnRegister)
2224 .addUse(Call->Arguments[0]) // Image.
2225 .addUse(Call->Arguments[1]) // Coordinate.
2226 .addImm(SPIRV::ImageOperand::Lod)
2227 .addUse(Call->Arguments[3]);
2228 return true;
2229 }
2230 return false;
2231}
2232
2234 MachineIRBuilder &MIRBuilder) {
2235 MIRBuilder.buildSelect(Call->ReturnRegister, Call->Arguments[0],
2236 Call->Arguments[1], Call->Arguments[2]);
2237 return true;
2238}
2239
2241 MachineIRBuilder &MIRBuilder,
2242 SPIRVGlobalRegistry *GR) {
2243 createContinuedInstructions(MIRBuilder, SPIRV::OpCompositeConstruct, 3,
2244 SPIRV::OpCompositeConstructContinuedINTEL,
2245 Call->Arguments, Call->ReturnRegister,
2246 GR->getSPIRVTypeID(Call->ReturnType));
2247 return true;
2248}
2249
2251 MachineIRBuilder &MIRBuilder,
2252 SPIRVGlobalRegistry *GR) {
2253 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2254 unsigned Opcode =
2255 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2256 bool IsSet = Opcode != SPIRV::OpCooperativeMatrixStoreKHR &&
2257 Opcode != SPIRV::OpCooperativeMatrixStoreCheckedINTEL &&
2258 Opcode != SPIRV::OpCooperativeMatrixPrefetchINTEL;
2259 unsigned ArgSz = Call->Arguments.size();
2260 unsigned LiteralIdx = 0;
2261 switch (Opcode) {
2262 // Memory operand is optional and is literal.
2263 case SPIRV::OpCooperativeMatrixLoadKHR:
2264 LiteralIdx = ArgSz > 3 ? 3 : 0;
2265 break;
2266 case SPIRV::OpCooperativeMatrixStoreKHR:
2267 LiteralIdx = ArgSz > 4 ? 4 : 0;
2268 break;
2269 case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
2270 LiteralIdx = ArgSz > 7 ? 7 : 0;
2271 break;
2272 case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
2273 LiteralIdx = ArgSz > 8 ? 8 : 0;
2274 break;
2275 // Cooperative Matrix Operands operand is optional and is literal.
2276 case SPIRV::OpCooperativeMatrixMulAddKHR:
2277 LiteralIdx = ArgSz > 3 ? 3 : 0;
2278 break;
2279 };
2280
2282 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2283 if (Opcode == SPIRV::OpCooperativeMatrixPrefetchINTEL) {
2284 const uint32_t CacheLevel = getConstFromIntrinsic(Call->Arguments[3], MRI);
2285 auto MIB = MIRBuilder.buildInstr(SPIRV::OpCooperativeMatrixPrefetchINTEL)
2286 .addUse(Call->Arguments[0]) // pointer
2287 .addUse(Call->Arguments[1]) // rows
2288 .addUse(Call->Arguments[2]) // columns
2289 .addImm(CacheLevel) // cache level
2290 .addUse(Call->Arguments[4]); // memory layout
2291 if (ArgSz > 5)
2292 MIB.addUse(Call->Arguments[5]); // stride
2293 if (ArgSz > 6) {
2294 const uint32_t MemOp = getConstFromIntrinsic(Call->Arguments[6], MRI);
2295 MIB.addImm(MemOp); // memory operand
2296 }
2297 return true;
2298 }
2299 if (LiteralIdx > 0)
2300 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[LiteralIdx], MRI));
2301 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2302 if (Opcode == SPIRV::OpCooperativeMatrixLengthKHR) {
2303 SPIRVType *CoopMatrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
2304 if (!CoopMatrType)
2305 report_fatal_error("Can't find a register's type definition");
2306 MIRBuilder.buildInstr(Opcode)
2307 .addDef(Call->ReturnRegister)
2308 .addUse(TypeReg)
2309 .addUse(CoopMatrType->getOperand(0).getReg());
2310 return true;
2311 }
2312 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2313 IsSet ? TypeReg : Register(0), ImmArgs);
2314}
2315
2317 MachineIRBuilder &MIRBuilder,
2318 SPIRVGlobalRegistry *GR) {
2319 // Lookup the instruction opcode in the TableGen records.
2320 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2321 unsigned Opcode =
2322 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2323 const MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2324
2325 switch (Opcode) {
2326 case SPIRV::OpSpecConstant: {
2327 // Build the SpecID decoration.
2328 unsigned SpecId =
2329 static_cast<unsigned>(getIConstVal(Call->Arguments[0], MRI));
2330 buildOpDecorate(Call->ReturnRegister, MIRBuilder, SPIRV::Decoration::SpecId,
2331 {SpecId});
2332 // Determine the constant MI.
2333 Register ConstRegister = Call->Arguments[1];
2334 const MachineInstr *Const = getDefInstrMaybeConstant(ConstRegister, MRI);
2335 assert(Const &&
2336 (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
2337 Const->getOpcode() == TargetOpcode::G_FCONSTANT) &&
2338 "Argument should be either an int or floating-point constant");
2339 // Determine the opcode and built the OpSpec MI.
2340 const MachineOperand &ConstOperand = Const->getOperand(1);
2341 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeBool) {
2342 assert(ConstOperand.isCImm() && "Int constant operand is expected");
2343 Opcode = ConstOperand.getCImm()->getValue().getZExtValue()
2344 ? SPIRV::OpSpecConstantTrue
2345 : SPIRV::OpSpecConstantFalse;
2346 }
2347 auto MIB = MIRBuilder.buildInstr(Opcode)
2348 .addDef(Call->ReturnRegister)
2349 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2350
2351 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeBool) {
2352 if (Const->getOpcode() == TargetOpcode::G_CONSTANT)
2353 addNumImm(ConstOperand.getCImm()->getValue(), MIB);
2354 else
2355 addNumImm(ConstOperand.getFPImm()->getValueAPF().bitcastToAPInt(), MIB);
2356 }
2357 return true;
2358 }
2359 case SPIRV::OpSpecConstantComposite: {
2360 createContinuedInstructions(MIRBuilder, Opcode, 3,
2361 SPIRV::OpSpecConstantCompositeContinuedINTEL,
2362 Call->Arguments, Call->ReturnRegister,
2363 GR->getSPIRVTypeID(Call->ReturnType));
2364 return true;
2365 }
2366 default:
2367 return false;
2368 }
2369}
2370
2372 MachineIRBuilder &MIRBuilder,
2373 SPIRVGlobalRegistry *GR) {
2374 // Lookup the instruction opcode in the TableGen records.
2375 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2376 unsigned Opcode =
2377 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2378
2379 return buildExtendedBitOpsInst(Call, Opcode, MIRBuilder, GR);
2380}
2381
2383 MachineIRBuilder &MIRBuilder,
2384 SPIRVGlobalRegistry *GR) {
2385 // Lookup the instruction opcode in the TableGen records.
2386 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2387 unsigned Opcode =
2388 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2389
2390 return buildBindlessImageINTELInst(Call, Opcode, MIRBuilder, GR);
2391}
2392
2394 MachineIRBuilder &MIRBuilder,
2395 SPIRVGlobalRegistry *GR) {
2396 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2397 unsigned Opcode =
2398 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2399 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
2400}
2401
2403 unsigned Opcode, MachineIRBuilder &MIRBuilder,
2404 SPIRVGlobalRegistry *GR) {
2405 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2407 Register InputReg = Call->Arguments[0];
2408 const Type *RetTy = GR->getTypeForSPIRVType(Call->ReturnType);
2409 bool IsSRet = RetTy->isVoidTy();
2410
2411 if (IsSRet) {
2412 const LLT ValTy = MRI->getType(InputReg);
2413 Register ActualRetValReg = MRI->createGenericVirtualRegister(ValTy);
2414 SPIRVType *InstructionType =
2415 GR->getPointeeType(GR->getSPIRVTypeForVReg(InputReg));
2416 InputReg = Call->Arguments[1];
2417 auto InputType = GR->getTypeForSPIRVType(GR->getSPIRVTypeForVReg(InputReg));
2418 Register PtrInputReg;
2419 if (InputType->getTypeID() == llvm::Type::TypeID::TypedPointerTyID) {
2420 LLT InputLLT = MRI->getType(InputReg);
2421 PtrInputReg = MRI->createGenericVirtualRegister(InputLLT);
2422 SPIRVType *PtrType =
2423 GR->getPointeeType(GR->getSPIRVTypeForVReg(InputReg));
2424 MachineMemOperand *MMO1 = MIRBuilder.getMF().getMachineMemOperand(
2426 InputLLT.getSizeInBytes(), Align(4));
2427 MIRBuilder.buildLoad(PtrInputReg, InputReg, *MMO1);
2428 MRI->setRegClass(PtrInputReg, &SPIRV::iIDRegClass);
2429 GR->assignSPIRVTypeToVReg(PtrType, PtrInputReg, MIRBuilder.getMF());
2430 }
2431
2432 for (unsigned index = 2; index < 7; index++) {
2433 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[index], MRI));
2434 }
2435
2436 // Emit the instruction
2437 auto MIB = MIRBuilder.buildInstr(Opcode)
2438 .addDef(ActualRetValReg)
2439 .addUse(GR->getSPIRVTypeID(InstructionType));
2440 if (PtrInputReg)
2441 MIB.addUse(PtrInputReg);
2442 else
2443 MIB.addUse(InputReg);
2444
2445 for (uint32_t Imm : ImmArgs)
2446 MIB.addImm(Imm);
2447 unsigned Size = ValTy.getSizeInBytes();
2448 // Store result to the pointer passed in Arg[0]
2449 MachineMemOperand *MMO = MIRBuilder.getMF().getMachineMemOperand(
2451 MRI->setRegClass(ActualRetValReg, &SPIRV::pIDRegClass);
2452 MIRBuilder.buildStore(ActualRetValReg, Call->Arguments[0], *MMO);
2453 return true;
2454 } else {
2455 for (unsigned index = 1; index < 6; index++)
2456 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[index], MRI));
2457
2458 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2459 GR->getSPIRVTypeID(Call->ReturnType), ImmArgs);
2460 }
2461}
2462
2464 MachineIRBuilder &MIRBuilder,
2465 SPIRVGlobalRegistry *GR) {
2466 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2467 unsigned Opcode =
2468 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2469
2470 return buildAPFixedPointInst(Call, Opcode, MIRBuilder, GR);
2471}
2472
2473static bool
2475 MachineIRBuilder &MIRBuilder,
2476 SPIRVGlobalRegistry *GR) {
2477 // Lookup the instruction opcode in the TableGen records.
2478 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2479 unsigned Opcode =
2480 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2481
2482 return buildTernaryBitwiseFunctionINTELInst(Call, Opcode, MIRBuilder, GR);
2483}
2484
2486 MachineIRBuilder &MIRBuilder,
2487 SPIRVGlobalRegistry *GR) {
2488 // Lookup the instruction opcode in the TableGen records.
2489 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2490 unsigned Opcode =
2491 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2492
2493 return build2DBlockIOINTELInst(Call, Opcode, MIRBuilder, GR);
2494}
2495
2497 MachineIRBuilder &MIRBuilder,
2498 SPIRVGlobalRegistry *GR) {
2499 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2500 unsigned Opcode =
2501 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2502
2503 unsigned Scope = SPIRV::Scope::Workgroup;
2504 if (Builtin->Name.contains("sub_group"))
2505 Scope = SPIRV::Scope::Subgroup;
2506
2507 return buildPipeInst(Call, Opcode, Scope, MIRBuilder, GR);
2508}
2509
2511 MachineIRBuilder &MIRBuilder,
2512 SPIRVGlobalRegistry *GR) {
2513 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2514 unsigned Opcode =
2515 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2516
2517 bool IsSet = Opcode != SPIRV::OpPredicatedStoreINTEL;
2518 unsigned ArgSz = Call->Arguments.size();
2520 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2521 // Memory operand is optional and is literal.
2522 if (ArgSz > 3)
2523 ImmArgs.push_back(
2524 getConstFromIntrinsic(Call->Arguments[/*Literal index*/ 3], MRI));
2525
2526 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2527 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2528 IsSet ? TypeReg : Register(0), ImmArgs);
2529}
2530
2532 MachineIRBuilder &MIRBuilder,
2533 SPIRVGlobalRegistry *GR) {
2534 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2535 SPIRVType *PtrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
2536 assert(PtrType->getOpcode() == SPIRV::OpTypePointer &&
2537 PtrType->getOperand(2).isReg());
2538 Register TypeReg = PtrType->getOperand(2).getReg();
2540 MachineFunction &MF = MIRBuilder.getMF();
2541 Register TmpReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2542 GR->assignSPIRVTypeToVReg(StructType, TmpReg, MF);
2543 // Skip the first arg, it's the destination pointer. OpBuildNDRange takes
2544 // three other arguments, so pass zero constant on absence.
2545 unsigned NumArgs = Call->Arguments.size();
2546 assert(NumArgs >= 2);
2547 Register GlobalWorkSize = Call->Arguments[NumArgs < 4 ? 1 : 2];
2548 Register LocalWorkSize =
2549 NumArgs == 2 ? Register(0) : Call->Arguments[NumArgs < 4 ? 2 : 3];
2550 Register GlobalWorkOffset = NumArgs <= 3 ? Register(0) : Call->Arguments[1];
2551 if (NumArgs < 4) {
2552 Register Const;
2553 SPIRVType *SpvTy = GR->getSPIRVTypeForVReg(GlobalWorkSize);
2554 if (SpvTy->getOpcode() == SPIRV::OpTypePointer) {
2555 MachineInstr *DefInstr = MRI->getUniqueVRegDef(GlobalWorkSize);
2556 assert(DefInstr && isSpvIntrinsic(*DefInstr, Intrinsic::spv_gep) &&
2557 DefInstr->getOperand(3).isReg());
2558 Register GWSPtr = DefInstr->getOperand(3).getReg();
2559 // TODO: Maybe simplify generation of the type of the fields.
2560 unsigned Size = Call->Builtin->Name == "ndrange_3D" ? 3 : 2;
2561 unsigned BitWidth = GR->getPointerSize() == 64 ? 64 : 32;
2563 Type *FieldTy = ArrayType::get(BaseTy, Size);
2564 SPIRVType *SpvFieldTy = GR->getOrCreateSPIRVType(
2565 FieldTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, true);
2566 GlobalWorkSize = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2567 GR->assignSPIRVTypeToVReg(SpvFieldTy, GlobalWorkSize, MF);
2568 MIRBuilder.buildInstr(SPIRV::OpLoad)
2569 .addDef(GlobalWorkSize)
2570 .addUse(GR->getSPIRVTypeID(SpvFieldTy))
2571 .addUse(GWSPtr);
2572 const SPIRVSubtarget &ST =
2573 cast<SPIRVSubtarget>(MIRBuilder.getMF().getSubtarget());
2574 Const = GR->getOrCreateConstIntArray(0, Size, *MIRBuilder.getInsertPt(),
2575 SpvFieldTy, *ST.getInstrInfo());
2576 } else {
2577 Const = GR->buildConstantInt(0, MIRBuilder, SpvTy, true);
2578 }
2579 if (!LocalWorkSize.isValid())
2580 LocalWorkSize = Const;
2581 if (!GlobalWorkOffset.isValid())
2582 GlobalWorkOffset = Const;
2583 }
2584 assert(LocalWorkSize.isValid() && GlobalWorkOffset.isValid());
2585 MIRBuilder.buildInstr(SPIRV::OpBuildNDRange)
2586 .addDef(TmpReg)
2587 .addUse(TypeReg)
2588 .addUse(GlobalWorkSize)
2589 .addUse(LocalWorkSize)
2590 .addUse(GlobalWorkOffset);
2591 return MIRBuilder.buildInstr(SPIRV::OpStore)
2592 .addUse(Call->Arguments[0])
2593 .addUse(TmpReg);
2594}
2595
2596// TODO: maybe move to the global register.
2597static SPIRVType *
2599 SPIRVGlobalRegistry *GR) {
2600 LLVMContext &Context = MIRBuilder.getMF().getFunction().getContext();
2601 unsigned SC1 = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
2602 Type *PtrType = PointerType::get(Context, SC1);
2603 return GR->getOrCreateSPIRVType(PtrType, MIRBuilder,
2604 SPIRV::AccessQualifier::ReadWrite, true);
2605}
2606
2608 MachineIRBuilder &MIRBuilder,
2609 SPIRVGlobalRegistry *GR) {
2610 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2611 const DataLayout &DL = MIRBuilder.getDataLayout();
2612 bool IsSpirvOp = Call->isSpirvOp();
2613 bool HasEvents = Call->Builtin->Name.contains("events") || IsSpirvOp;
2614 const SPIRVType *Int32Ty = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
2615
2616 // Make vararg instructions before OpEnqueueKernel.
2617 // Local sizes arguments: Sizes of block invoke arguments. Clang generates
2618 // local size operands as an array, so we need to unpack them.
2619 SmallVector<Register, 16> LocalSizes;
2620 if (Call->Builtin->Name.contains("_varargs") || IsSpirvOp) {
2621 const unsigned LocalSizeArrayIdx = HasEvents ? 9 : 6;
2622 Register GepReg = Call->Arguments[LocalSizeArrayIdx];
2623 MachineInstr *GepMI = MRI->getUniqueVRegDef(GepReg);
2624 assert(isSpvIntrinsic(*GepMI, Intrinsic::spv_gep) &&
2625 GepMI->getOperand(3).isReg());
2626 Register ArrayReg = GepMI->getOperand(3).getReg();
2627 MachineInstr *ArrayMI = MRI->getUniqueVRegDef(ArrayReg);
2628 const Type *LocalSizeTy = getMachineInstrType(ArrayMI);
2629 assert(LocalSizeTy && "Local size type is expected");
2630 const uint64_t LocalSizeNum =
2631 cast<ArrayType>(LocalSizeTy)->getNumElements();
2632 unsigned SC = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
2633 const LLT LLType = LLT::pointer(SC, GR->getPointerSize());
2634 const SPIRVType *PointerSizeTy = GR->getOrCreateSPIRVPointerType(
2635 Int32Ty, MIRBuilder, SPIRV::StorageClass::Function);
2636 for (unsigned I = 0; I < LocalSizeNum; ++I) {
2637 Register Reg = MRI->createVirtualRegister(&SPIRV::pIDRegClass);
2638 MRI->setType(Reg, LLType);
2639 GR->assignSPIRVTypeToVReg(PointerSizeTy, Reg, MIRBuilder.getMF());
2640 auto GEPInst = MIRBuilder.buildIntrinsic(
2641 Intrinsic::spv_gep, ArrayRef<Register>{Reg}, true, false);
2642 GEPInst
2643 .addImm(GepMI->getOperand(2).getImm()) // In bound.
2644 .addUse(ArrayMI->getOperand(0).getReg()) // Alloca.
2645 .addUse(buildConstantIntReg32(0, MIRBuilder, GR)) // Indices.
2646 .addUse(buildConstantIntReg32(I, MIRBuilder, GR));
2647 LocalSizes.push_back(Reg);
2648 }
2649 }
2650
2651 // SPIRV OpEnqueueKernel instruction has 10+ arguments.
2652 auto MIB = MIRBuilder.buildInstr(SPIRV::OpEnqueueKernel)
2653 .addDef(Call->ReturnRegister)
2655
2656 // Copy all arguments before block invoke function pointer.
2657 const unsigned BlockFIdx = HasEvents ? 6 : 3;
2658 for (unsigned i = 0; i < BlockFIdx; i++)
2659 MIB.addUse(Call->Arguments[i]);
2660
2661 // If there are no event arguments in the original call, add dummy ones.
2662 if (!HasEvents) {
2663 MIB.addUse(buildConstantIntReg32(0, MIRBuilder, GR)); // Dummy num events.
2664 Register NullPtr = GR->getOrCreateConstNullPtr(
2665 MIRBuilder, getOrCreateSPIRVDeviceEventPointer(MIRBuilder, GR));
2666 MIB.addUse(NullPtr); // Dummy wait events.
2667 MIB.addUse(NullPtr); // Dummy ret event.
2668 }
2669
2670 MachineInstr *BlockMI = getBlockStructInstr(Call->Arguments[BlockFIdx], MRI);
2671 assert(BlockMI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
2672 // Invoke: Pointer to invoke function.
2673 MIB.addGlobalAddress(BlockMI->getOperand(1).getGlobal());
2674
2675 Register BlockLiteralReg = Call->Arguments[BlockFIdx + 1];
2676 // Param: Pointer to block literal.
2677 MIB.addUse(BlockLiteralReg);
2678
2679 Type *PType = const_cast<Type *>(getBlockStructType(BlockLiteralReg, MRI));
2680 // TODO: these numbers should be obtained from block literal structure.
2681 // Param Size: Size of block literal structure.
2682 MIB.addUse(buildConstantIntReg32(DL.getTypeStoreSize(PType), MIRBuilder, GR));
2683 // Param Aligment: Aligment of block literal structure.
2684 MIB.addUse(buildConstantIntReg32(DL.getPrefTypeAlign(PType).value(),
2685 MIRBuilder, GR));
2686
2687 for (unsigned i = 0; i < LocalSizes.size(); i++)
2688 MIB.addUse(LocalSizes[i]);
2689 return true;
2690}
2691
2693 MachineIRBuilder &MIRBuilder,
2694 SPIRVGlobalRegistry *GR) {
2695 // Lookup the instruction opcode in the TableGen records.
2696 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2697 unsigned Opcode =
2698 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2699
2700 switch (Opcode) {
2701 case SPIRV::OpRetainEvent:
2702 case SPIRV::OpReleaseEvent:
2703 return MIRBuilder.buildInstr(Opcode).addUse(Call->Arguments[0]);
2704 case SPIRV::OpCreateUserEvent:
2705 case SPIRV::OpGetDefaultQueue:
2706 return MIRBuilder.buildInstr(Opcode)
2707 .addDef(Call->ReturnRegister)
2708 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2709 case SPIRV::OpIsValidEvent:
2710 return MIRBuilder.buildInstr(Opcode)
2711 .addDef(Call->ReturnRegister)
2712 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2713 .addUse(Call->Arguments[0]);
2714 case SPIRV::OpSetUserEventStatus:
2715 return MIRBuilder.buildInstr(Opcode)
2716 .addUse(Call->Arguments[0])
2717 .addUse(Call->Arguments[1]);
2718 case SPIRV::OpCaptureEventProfilingInfo:
2719 return MIRBuilder.buildInstr(Opcode)
2720 .addUse(Call->Arguments[0])
2721 .addUse(Call->Arguments[1])
2722 .addUse(Call->Arguments[2]);
2723 case SPIRV::OpBuildNDRange:
2724 return buildNDRange(Call, MIRBuilder, GR);
2725 case SPIRV::OpEnqueueKernel:
2726 return buildEnqueueKernel(Call, MIRBuilder, GR);
2727 default:
2728 return false;
2729 }
2730}
2731
2733 MachineIRBuilder &MIRBuilder,
2734 SPIRVGlobalRegistry *GR) {
2735 // Lookup the instruction opcode in the TableGen records.
2736 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2737 unsigned Opcode =
2738 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2739
2740 bool IsSet = Opcode == SPIRV::OpGroupAsyncCopy;
2741 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2742 if (Call->isSpirvOp())
2743 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2744 IsSet ? TypeReg : Register(0));
2745
2746 auto Scope = buildConstantIntReg32(SPIRV::Scope::Workgroup, MIRBuilder, GR);
2747
2748 switch (Opcode) {
2749 case SPIRV::OpGroupAsyncCopy: {
2750 SPIRVType *NewType =
2751 Call->ReturnType->getOpcode() == SPIRV::OpTypeEvent
2752 ? nullptr
2753 : GR->getOrCreateSPIRVTypeByName("spirv.Event", MIRBuilder, true);
2754 Register TypeReg = GR->getSPIRVTypeID(NewType ? NewType : Call->ReturnType);
2755 unsigned NumArgs = Call->Arguments.size();
2756 Register EventReg = Call->Arguments[NumArgs - 1];
2757 bool Res = MIRBuilder.buildInstr(Opcode)
2758 .addDef(Call->ReturnRegister)
2759 .addUse(TypeReg)
2760 .addUse(Scope)
2761 .addUse(Call->Arguments[0])
2762 .addUse(Call->Arguments[1])
2763 .addUse(Call->Arguments[2])
2764 .addUse(Call->Arguments.size() > 4
2765 ? Call->Arguments[3]
2766 : buildConstantIntReg32(1, MIRBuilder, GR))
2767 .addUse(EventReg);
2768 if (NewType != nullptr)
2769 insertAssignInstr(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder,
2770 MIRBuilder.getMF().getRegInfo());
2771 return Res;
2772 }
2773 case SPIRV::OpGroupWaitEvents:
2774 return MIRBuilder.buildInstr(Opcode)
2775 .addUse(Scope)
2776 .addUse(Call->Arguments[0])
2777 .addUse(Call->Arguments[1]);
2778 default:
2779 return false;
2780 }
2781}
2782
2783static bool generateConvertInst(const StringRef DemangledCall,
2785 MachineIRBuilder &MIRBuilder,
2786 SPIRVGlobalRegistry *GR) {
2787 // Lookup the conversion builtin in the TableGen records.
2788 const SPIRV::ConvertBuiltin *Builtin =
2789 SPIRV::lookupConvertBuiltin(Call->Builtin->Name, Call->Builtin->Set);
2790
2791 if (!Builtin && Call->isSpirvOp()) {
2792 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2793 unsigned Opcode =
2794 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2795 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2796 GR->getSPIRVTypeID(Call->ReturnType));
2797 }
2798
2799 assert(Builtin && "Conversion builtin not found.");
2800 if (Builtin->IsSaturated)
2801 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
2802 SPIRV::Decoration::SaturatedConversion, {});
2803 if (Builtin->IsRounded)
2804 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
2805 SPIRV::Decoration::FPRoundingMode,
2806 {(unsigned)Builtin->RoundingMode});
2807
2808 std::string NeedExtMsg; // no errors if empty
2809 bool IsRightComponentsNumber = true; // check if input/output accepts vectors
2810 unsigned Opcode = SPIRV::OpNop;
2811 if (GR->isScalarOrVectorOfType(Call->Arguments[0], SPIRV::OpTypeInt)) {
2812 // Int -> ...
2813 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2814 // Int -> Int
2815 if (Builtin->IsSaturated)
2816 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpSatConvertUToS
2817 : SPIRV::OpSatConvertSToU;
2818 else
2819 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpUConvert
2820 : SPIRV::OpSConvert;
2821 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2822 SPIRV::OpTypeFloat)) {
2823 // Int -> Float
2824 if (Builtin->IsBfloat16) {
2825 const auto *ST = static_cast<const SPIRVSubtarget *>(
2826 &MIRBuilder.getMF().getSubtarget());
2827 if (!ST->canUseExtension(
2828 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2829 NeedExtMsg = "SPV_INTEL_bfloat16_conversion";
2830 IsRightComponentsNumber =
2831 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2832 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2833 Opcode = SPIRV::OpConvertBF16ToFINTEL;
2834 } else {
2835 bool IsSourceSigned =
2836 DemangledCall[DemangledCall.find_first_of('(') + 1] != 'u';
2837 Opcode = IsSourceSigned ? SPIRV::OpConvertSToF : SPIRV::OpConvertUToF;
2838 }
2839 }
2840 } else if (GR->isScalarOrVectorOfType(Call->Arguments[0],
2841 SPIRV::OpTypeFloat)) {
2842 // Float -> ...
2843 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2844 // Float -> Int
2845 if (Builtin->IsBfloat16) {
2846 const auto *ST = static_cast<const SPIRVSubtarget *>(
2847 &MIRBuilder.getMF().getSubtarget());
2848 if (!ST->canUseExtension(
2849 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2850 NeedExtMsg = "SPV_INTEL_bfloat16_conversion";
2851 IsRightComponentsNumber =
2852 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2853 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2854 Opcode = SPIRV::OpConvertFToBF16INTEL;
2855 } else {
2856 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpConvertFToS
2857 : SPIRV::OpConvertFToU;
2858 }
2859 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2860 SPIRV::OpTypeFloat)) {
2861 if (Builtin->IsTF32) {
2862 const auto *ST = static_cast<const SPIRVSubtarget *>(
2863 &MIRBuilder.getMF().getSubtarget());
2864 if (!ST->canUseExtension(
2865 SPIRV::Extension::SPV_INTEL_tensor_float32_conversion))
2866 NeedExtMsg = "SPV_INTEL_tensor_float32_conversion";
2867 IsRightComponentsNumber =
2868 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2869 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2870 Opcode = SPIRV::OpRoundFToTF32INTEL;
2871 } else {
2872 // Float -> Float
2873 Opcode = SPIRV::OpFConvert;
2874 }
2875 }
2876 }
2877
2878 if (!NeedExtMsg.empty()) {
2879 std::string DiagMsg = std::string(Builtin->Name) +
2880 ": the builtin requires the following SPIR-V "
2881 "extension: " +
2882 NeedExtMsg;
2883 report_fatal_error(DiagMsg.c_str(), false);
2884 }
2885 if (!IsRightComponentsNumber) {
2886 std::string DiagMsg =
2887 std::string(Builtin->Name) +
2888 ": result and argument must have the same number of components";
2889 report_fatal_error(DiagMsg.c_str(), false);
2890 }
2891 assert(Opcode != SPIRV::OpNop &&
2892 "Conversion between the types not implemented!");
2893
2894 MIRBuilder.buildInstr(Opcode)
2895 .addDef(Call->ReturnRegister)
2896 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2897 .addUse(Call->Arguments[0]);
2898 return true;
2899}
2900
2902 MachineIRBuilder &MIRBuilder,
2903 SPIRVGlobalRegistry *GR) {
2904 // Lookup the vector load/store builtin in the TableGen records.
2905 const SPIRV::VectorLoadStoreBuiltin *Builtin =
2906 SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2907 Call->Builtin->Set);
2908 // Build extended instruction.
2909 auto MIB =
2910 MIRBuilder.buildInstr(SPIRV::OpExtInst)
2911 .addDef(Call->ReturnRegister)
2912 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2913 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
2914 .addImm(Builtin->Number);
2915 for (auto Argument : Call->Arguments)
2916 MIB.addUse(Argument);
2917 if (Builtin->Name.contains("load") && Builtin->ElementCount > 1)
2918 MIB.addImm(Builtin->ElementCount);
2919
2920 // Rounding mode should be passed as a last argument in the MI for builtins
2921 // like "vstorea_halfn_r".
2922 if (Builtin->IsRounded)
2923 MIB.addImm(static_cast<uint32_t>(Builtin->RoundingMode));
2924 return true;
2925}
2926
2928 MachineIRBuilder &MIRBuilder,
2929 SPIRVGlobalRegistry *GR) {
2930 // Lookup the instruction opcode in the TableGen records.
2931 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2932 unsigned Opcode =
2933 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2934 bool IsLoad = Opcode == SPIRV::OpLoad;
2935 // Build the instruction.
2936 auto MIB = MIRBuilder.buildInstr(Opcode);
2937 if (IsLoad) {
2938 MIB.addDef(Call->ReturnRegister);
2939 MIB.addUse(GR->getSPIRVTypeID(Call->ReturnType));
2940 }
2941 // Add a pointer to the value to load/store.
2942 MIB.addUse(Call->Arguments[0]);
2943 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2944 // Add a value to store.
2945 if (!IsLoad)
2946 MIB.addUse(Call->Arguments[1]);
2947 // Add optional memory attributes and an alignment.
2948 unsigned NumArgs = Call->Arguments.size();
2949 if ((IsLoad && NumArgs >= 2) || NumArgs >= 3)
2950 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 1 : 2], MRI));
2951 if ((IsLoad && NumArgs >= 3) || NumArgs >= 4)
2952 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 2 : 3], MRI));
2953 return true;
2954}
2955
2956namespace SPIRV {
2957// Try to find a builtin function attributes by a demangled function name and
2958// return a tuple <builtin group, op code, ext instruction number>, or a special
2959// tuple value <-1, 0, 0> if the builtin function is not found.
2960// Not all builtin functions are supported, only those with a ready-to-use op
2961// code or instruction number defined in TableGen.
2962// TODO: consider a major rework of mapping demangled calls into a builtin
2963// functions to unify search and decrease number of individual cases.
2964std::tuple<int, unsigned, unsigned>
2965mapBuiltinToOpcode(const StringRef DemangledCall,
2966 SPIRV::InstructionSet::InstructionSet Set) {
2967 Register Reg;
2969 std::unique_ptr<const IncomingCall> Call =
2970 lookupBuiltin(DemangledCall, Set, Reg, nullptr, Args);
2971 if (!Call)
2972 return std::make_tuple(-1, 0, 0);
2973
2974 switch (Call->Builtin->Group) {
2975 case SPIRV::Relational:
2976 case SPIRV::Atomic:
2977 case SPIRV::Barrier:
2978 case SPIRV::CastToPtr:
2979 case SPIRV::ImageMiscQuery:
2980 case SPIRV::SpecConstant:
2981 case SPIRV::Enqueue:
2982 case SPIRV::AsyncCopy:
2983 case SPIRV::LoadStore:
2984 case SPIRV::CoopMatr:
2985 if (const auto *R =
2986 SPIRV::lookupNativeBuiltin(Call->Builtin->Name, Call->Builtin->Set))
2987 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2988 break;
2989 case SPIRV::Extended:
2990 if (const auto *R = SPIRV::lookupExtendedBuiltin(Call->Builtin->Name,
2991 Call->Builtin->Set))
2992 return std::make_tuple(Call->Builtin->Group, 0, R->Number);
2993 break;
2994 case SPIRV::VectorLoadStore:
2995 if (const auto *R = SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2996 Call->Builtin->Set))
2997 return std::make_tuple(SPIRV::Extended, 0, R->Number);
2998 break;
2999 case SPIRV::Group:
3000 if (const auto *R = SPIRV::lookupGroupBuiltin(Call->Builtin->Name))
3001 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3002 break;
3003 case SPIRV::AtomicFloating:
3004 if (const auto *R = SPIRV::lookupAtomicFloatingBuiltin(Call->Builtin->Name))
3005 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3006 break;
3007 case SPIRV::IntelSubgroups:
3008 if (const auto *R = SPIRV::lookupIntelSubgroupsBuiltin(Call->Builtin->Name))
3009 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3010 break;
3011 case SPIRV::GroupUniform:
3012 if (const auto *R = SPIRV::lookupGroupUniformBuiltin(Call->Builtin->Name))
3013 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3014 break;
3015 case SPIRV::IntegerDot:
3016 if (const auto *R =
3017 SPIRV::lookupIntegerDotProductBuiltin(Call->Builtin->Name))
3018 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3019 break;
3020 case SPIRV::WriteImage:
3021 return std::make_tuple(Call->Builtin->Group, SPIRV::OpImageWrite, 0);
3022 case SPIRV::Select:
3023 return std::make_tuple(Call->Builtin->Group, TargetOpcode::G_SELECT, 0);
3024 case SPIRV::Construct:
3025 return std::make_tuple(Call->Builtin->Group, SPIRV::OpCompositeConstruct,
3026 0);
3027 case SPIRV::KernelClock:
3028 return std::make_tuple(Call->Builtin->Group, SPIRV::OpReadClockKHR, 0);
3029 default:
3030 return std::make_tuple(-1, 0, 0);
3031 }
3032 return std::make_tuple(-1, 0, 0);
3033}
3034
3035std::optional<bool> lowerBuiltin(const StringRef DemangledCall,
3036 SPIRV::InstructionSet::InstructionSet Set,
3037 MachineIRBuilder &MIRBuilder,
3038 const Register OrigRet, const Type *OrigRetTy,
3039 const SmallVectorImpl<Register> &Args,
3040 SPIRVGlobalRegistry *GR, const CallBase &CB) {
3041 LLVM_DEBUG(dbgs() << "Lowering builtin call: " << DemangledCall << "\n");
3042
3043 // Lookup the builtin in the TableGen records.
3044 SPIRVType *SpvType = GR->getSPIRVTypeForVReg(OrigRet);
3045 assert(SpvType && "Inconsistent return register: expected valid type info");
3046 std::unique_ptr<const IncomingCall> Call =
3047 lookupBuiltin(DemangledCall, Set, OrigRet, SpvType, Args);
3048
3049 if (!Call) {
3050 LLVM_DEBUG(dbgs() << "Builtin record was not found!\n");
3051 return std::nullopt;
3052 }
3053
3054 // TODO: check if the provided args meet the builtin requirments.
3055 assert(Args.size() >= Call->Builtin->MinNumArgs &&
3056 "Too few arguments to generate the builtin");
3057 if (Call->Builtin->MaxNumArgs && Args.size() > Call->Builtin->MaxNumArgs)
3058 LLVM_DEBUG(dbgs() << "More arguments provided than required!\n");
3059
3060 // Match the builtin with implementation based on the grouping.
3061 switch (Call->Builtin->Group) {
3062 case SPIRV::Extended:
3063 return generateExtInst(Call.get(), MIRBuilder, GR, CB);
3064 case SPIRV::Relational:
3065 return generateRelationalInst(Call.get(), MIRBuilder, GR);
3066 case SPIRV::Group:
3067 return generateGroupInst(Call.get(), MIRBuilder, GR);
3068 case SPIRV::Variable:
3069 return generateBuiltinVar(Call.get(), MIRBuilder, GR);
3070 case SPIRV::Atomic:
3071 return generateAtomicInst(Call.get(), MIRBuilder, GR);
3072 case SPIRV::AtomicFloating:
3073 return generateAtomicFloatingInst(Call.get(), MIRBuilder, GR);
3074 case SPIRV::Barrier:
3075 return generateBarrierInst(Call.get(), MIRBuilder, GR);
3076 case SPIRV::CastToPtr:
3077 return generateCastToPtrInst(Call.get(), MIRBuilder, GR);
3078 case SPIRV::Dot:
3079 case SPIRV::IntegerDot:
3080 return generateDotOrFMulInst(DemangledCall, Call.get(), MIRBuilder, GR);
3081 case SPIRV::Wave:
3082 return generateWaveInst(Call.get(), MIRBuilder, GR);
3083 case SPIRV::ICarryBorrow:
3084 return generateICarryBorrowInst(Call.get(), MIRBuilder, GR);
3085 case SPIRV::GetQuery:
3086 return generateGetQueryInst(Call.get(), MIRBuilder, GR);
3087 case SPIRV::ImageSizeQuery:
3088 return generateImageSizeQueryInst(Call.get(), MIRBuilder, GR);
3089 case SPIRV::ImageMiscQuery:
3090 return generateImageMiscQueryInst(Call.get(), MIRBuilder, GR);
3091 case SPIRV::ReadImage:
3092 return generateReadImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
3093 case SPIRV::WriteImage:
3094 return generateWriteImageInst(Call.get(), MIRBuilder, GR);
3095 case SPIRV::SampleImage:
3096 return generateSampleImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
3097 case SPIRV::Select:
3098 return generateSelectInst(Call.get(), MIRBuilder);
3099 case SPIRV::Construct:
3100 return generateConstructInst(Call.get(), MIRBuilder, GR);
3101 case SPIRV::SpecConstant:
3102 return generateSpecConstantInst(Call.get(), MIRBuilder, GR);
3103 case SPIRV::Enqueue:
3104 return generateEnqueueInst(Call.get(), MIRBuilder, GR);
3105 case SPIRV::AsyncCopy:
3106 return generateAsyncCopy(Call.get(), MIRBuilder, GR);
3107 case SPIRV::Convert:
3108 return generateConvertInst(DemangledCall, Call.get(), MIRBuilder, GR);
3109 case SPIRV::VectorLoadStore:
3110 return generateVectorLoadStoreInst(Call.get(), MIRBuilder, GR);
3111 case SPIRV::LoadStore:
3112 return generateLoadStoreInst(Call.get(), MIRBuilder, GR);
3113 case SPIRV::IntelSubgroups:
3114 return generateIntelSubgroupsInst(Call.get(), MIRBuilder, GR);
3115 case SPIRV::GroupUniform:
3116 return generateGroupUniformInst(Call.get(), MIRBuilder, GR);
3117 case SPIRV::KernelClock:
3118 return generateKernelClockInst(Call.get(), MIRBuilder, GR);
3119 case SPIRV::CoopMatr:
3120 return generateCoopMatrInst(Call.get(), MIRBuilder, GR);
3121 case SPIRV::ExtendedBitOps:
3122 return generateExtendedBitOpsInst(Call.get(), MIRBuilder, GR);
3123 case SPIRV::BindlessINTEL:
3124 return generateBindlessImageINTELInst(Call.get(), MIRBuilder, GR);
3125 case SPIRV::TernaryBitwiseINTEL:
3126 return generateTernaryBitwiseFunctionINTELInst(Call.get(), MIRBuilder, GR);
3127 case SPIRV::Block2DLoadStore:
3128 return generate2DBlockIOINTELInst(Call.get(), MIRBuilder, GR);
3129 case SPIRV::Pipe:
3130 return generatePipeInst(Call.get(), MIRBuilder, GR);
3131 case SPIRV::PredicatedLoadStore:
3132 return generatePredicatedLoadStoreInst(Call.get(), MIRBuilder, GR);
3133 case SPIRV::BlockingPipes:
3134 return generateBlockingPipesInst(Call.get(), MIRBuilder, GR);
3135 case SPIRV::ArbitraryPrecisionFixedPoint:
3136 return generateAPFixedPointInst(Call.get(), MIRBuilder, GR);
3137 }
3138 return false;
3139}
3140
3142 // Parse strings representing OpenCL builtin types.
3143 if (hasBuiltinTypePrefix(TypeStr)) {
3144 // OpenCL builtin types in demangled call strings have the following format:
3145 // e.g. ocl_image2d_ro
3146 [[maybe_unused]] bool IsOCLBuiltinType = TypeStr.consume_front("ocl_");
3147 assert(IsOCLBuiltinType && "Invalid OpenCL builtin prefix");
3148
3149 // Check if this is pointer to a builtin type and not just pointer
3150 // representing a builtin type. In case it is a pointer to builtin type,
3151 // this will require additional handling in the method calling
3152 // parseBuiltinCallArgumentBaseType(...) as this function only retrieves the
3153 // base types.
3154 if (TypeStr.ends_with("*"))
3155 TypeStr = TypeStr.slice(0, TypeStr.find_first_of(" *"));
3156
3157 return parseBuiltinTypeNameToTargetExtType("opencl." + TypeStr.str() + "_t",
3158 Ctx);
3159 }
3160
3161 // Parse type name in either "typeN" or "type vector[N]" format, where
3162 // N is the number of elements of the vector.
3163 Type *BaseType;
3164 unsigned VecElts = 0;
3165
3166 BaseType = parseBasicTypeName(TypeStr, Ctx);
3167 if (!BaseType)
3168 // Unable to recognize SPIRV type name.
3169 return nullptr;
3170
3171 // Handle "typeN*" or "type vector[N]*".
3172 TypeStr.consume_back("*");
3173
3174 if (TypeStr.consume_front(" vector["))
3175 TypeStr = TypeStr.substr(0, TypeStr.find(']'));
3176
3177 TypeStr.getAsInteger(10, VecElts);
3178 if (VecElts > 0)
3180 BaseType->isVoidTy() ? Type::getInt8Ty(Ctx) : BaseType, VecElts, false);
3181
3182 return BaseType;
3183}
3184
3186 const StringRef DemangledCall, LLVMContext &Ctx) {
3187 auto Pos1 = DemangledCall.find('(');
3188 if (Pos1 == StringRef::npos)
3189 return false;
3190 auto Pos2 = DemangledCall.find(')');
3191 if (Pos2 == StringRef::npos || Pos1 > Pos2)
3192 return false;
3193 DemangledCall.slice(Pos1 + 1, Pos2)
3194 .split(BuiltinArgsTypeStrs, ',', -1, false);
3195 return true;
3196}
3197
3199 unsigned ArgIdx, LLVMContext &Ctx) {
3200 SmallVector<StringRef, 10> BuiltinArgsTypeStrs;
3201 parseBuiltinTypeStr(BuiltinArgsTypeStrs, DemangledCall, Ctx);
3202 if (ArgIdx >= BuiltinArgsTypeStrs.size())
3203 return nullptr;
3204 StringRef TypeStr = BuiltinArgsTypeStrs[ArgIdx].trim();
3205 return parseBuiltinCallArgumentType(TypeStr, Ctx);
3206}
3207
3212
3213#define GET_BuiltinTypes_DECL
3214#define GET_BuiltinTypes_IMPL
3215
3220
3221#define GET_OpenCLTypes_DECL
3222#define GET_OpenCLTypes_IMPL
3223
3224#include "SPIRVGenTables.inc"
3225} // namespace SPIRV
3226
3227//===----------------------------------------------------------------------===//
3228// Misc functions for parsing builtin types.
3229//===----------------------------------------------------------------------===//
3230
3231static Type *parseTypeString(const StringRef Name, LLVMContext &Context) {
3232 if (Name.starts_with("void"))
3233 return Type::getVoidTy(Context);
3234 else if (Name.starts_with("int") || Name.starts_with("uint"))
3235 return Type::getInt32Ty(Context);
3236 else if (Name.starts_with("float"))
3237 return Type::getFloatTy(Context);
3238 else if (Name.starts_with("half"))
3239 return Type::getHalfTy(Context);
3240 report_fatal_error("Unable to recognize type!");
3241}
3242
3243//===----------------------------------------------------------------------===//
3244// Implementation functions for builtin types.
3245//===----------------------------------------------------------------------===//
3246
3248 const SPIRV::BuiltinType *TypeRecord,
3249 MachineIRBuilder &MIRBuilder,
3250 SPIRVGlobalRegistry *GR) {
3251 unsigned Opcode = TypeRecord->Opcode;
3252 // Create or get an existing type from GlobalRegistry.
3253 return GR->getOrCreateOpTypeByOpcode(ExtensionType, MIRBuilder, Opcode);
3254}
3255
3257 SPIRVGlobalRegistry *GR) {
3258 // Create or get an existing type from GlobalRegistry.
3259 return GR->getOrCreateOpTypeSampler(MIRBuilder);
3260}
3261
3262static SPIRVType *getPipeType(const TargetExtType *ExtensionType,
3263 MachineIRBuilder &MIRBuilder,
3264 SPIRVGlobalRegistry *GR) {
3265 assert(ExtensionType->getNumIntParameters() == 1 &&
3266 "Invalid number of parameters for SPIR-V pipe builtin!");
3267 // Create or get an existing type from GlobalRegistry.
3268 return GR->getOrCreateOpTypePipe(MIRBuilder,
3269 SPIRV::AccessQualifier::AccessQualifier(
3270 ExtensionType->getIntParameter(0)));
3271}
3272
3273static SPIRVType *getCoopMatrType(const TargetExtType *ExtensionType,
3274 MachineIRBuilder &MIRBuilder,
3275 SPIRVGlobalRegistry *GR) {
3276 assert(ExtensionType->getNumIntParameters() == 4 &&
3277 "Invalid number of parameters for SPIR-V coop matrices builtin!");
3278 assert(ExtensionType->getNumTypeParameters() == 1 &&
3279 "SPIR-V coop matrices builtin type must have a type parameter!");
3280 const SPIRVType *ElemType =
3281 GR->getOrCreateSPIRVType(ExtensionType->getTypeParameter(0), MIRBuilder,
3282 SPIRV::AccessQualifier::ReadWrite, true);
3283 // Create or get an existing type from GlobalRegistry.
3284 return GR->getOrCreateOpTypeCoopMatr(
3285 MIRBuilder, ExtensionType, ElemType, ExtensionType->getIntParameter(0),
3286 ExtensionType->getIntParameter(1), ExtensionType->getIntParameter(2),
3287 ExtensionType->getIntParameter(3), true);
3288}
3289
3291 MachineIRBuilder &MIRBuilder,
3292 SPIRVGlobalRegistry *GR) {
3293 SPIRVType *OpaqueImageType = GR->getImageType(
3294 OpaqueType, SPIRV::AccessQualifier::ReadOnly, MIRBuilder);
3295 // Create or get an existing type from GlobalRegistry.
3296 return GR->getOrCreateOpTypeSampledImage(OpaqueImageType, MIRBuilder);
3297}
3298
3299static SPIRVType *getInlineSpirvType(const TargetExtType *ExtensionType,
3300 MachineIRBuilder &MIRBuilder,
3301 SPIRVGlobalRegistry *GR) {
3302 assert(ExtensionType->getNumIntParameters() == 3 &&
3303 "Inline SPIR-V type builtin takes an opcode, size, and alignment "
3304 "parameter");
3305 auto Opcode = ExtensionType->getIntParameter(0);
3306
3307 SmallVector<MCOperand> Operands;
3308 for (Type *Param : ExtensionType->type_params()) {
3309 if (const TargetExtType *ParamEType = dyn_cast<TargetExtType>(Param)) {
3310 if (ParamEType->getName() == "spirv.IntegralConstant") {
3311 assert(ParamEType->getNumTypeParameters() == 1 &&
3312 "Inline SPIR-V integral constant builtin must have a type "
3313 "parameter");
3314 assert(ParamEType->getNumIntParameters() == 1 &&
3315 "Inline SPIR-V integral constant builtin must have a "
3316 "value parameter");
3317
3318 auto OperandValue = ParamEType->getIntParameter(0);
3319 auto *OperandType = ParamEType->getTypeParameter(0);
3320
3321 const SPIRVType *OperandSPIRVType = GR->getOrCreateSPIRVType(
3322 OperandType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, true);
3323
3325 OperandValue, MIRBuilder, OperandSPIRVType, true)));
3326 continue;
3327 } else if (ParamEType->getName() == "spirv.Literal") {
3328 assert(ParamEType->getNumTypeParameters() == 0 &&
3329 "Inline SPIR-V literal builtin does not take type "
3330 "parameters");
3331 assert(ParamEType->getNumIntParameters() == 1 &&
3332 "Inline SPIR-V literal builtin must have an integer "
3333 "parameter");
3334
3335 auto OperandValue = ParamEType->getIntParameter(0);
3336
3337 Operands.push_back(MCOperand::createImm(OperandValue));
3338 continue;
3339 }
3340 }
3341 const SPIRVType *TypeOperand = GR->getOrCreateSPIRVType(
3342 Param, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, true);
3343 Operands.push_back(MCOperand::createReg(GR->getSPIRVTypeID(TypeOperand)));
3344 }
3345
3346 return GR->getOrCreateUnknownType(ExtensionType, MIRBuilder, Opcode,
3347 Operands);
3348}
3349
3350static SPIRVType *getVulkanBufferType(const TargetExtType *ExtensionType,
3351 MachineIRBuilder &MIRBuilder,
3352 SPIRVGlobalRegistry *GR) {
3353 assert(ExtensionType->getNumTypeParameters() == 1 &&
3354 "Vulkan buffers have exactly one type for the type of the buffer.");
3355 assert(ExtensionType->getNumIntParameters() == 2 &&
3356 "Vulkan buffer have 2 integer parameters: storage class and is "
3357 "writable.");
3358
3359 auto *T = ExtensionType->getTypeParameter(0);
3360 auto SC = static_cast<SPIRV::StorageClass::StorageClass>(
3361 ExtensionType->getIntParameter(0));
3362 bool IsWritable = ExtensionType->getIntParameter(1);
3363 return GR->getOrCreateVulkanBufferType(MIRBuilder, T, SC, IsWritable);
3364}
3365
3366static SPIRVType *getLayoutType(const TargetExtType *ExtensionType,
3367 MachineIRBuilder &MIRBuilder,
3368 SPIRVGlobalRegistry *GR) {
3369 return GR->getOrCreateLayoutType(MIRBuilder, ExtensionType);
3370}
3371
3372namespace SPIRV {
3374 LLVMContext &Context) {
3375 StringRef NameWithParameters = TypeName;
3376
3377 // Pointers-to-opaque-structs representing OpenCL types are first translated
3378 // to equivalent SPIR-V types. OpenCL builtin type names should have the
3379 // following format: e.g. %opencl.event_t
3380 if (NameWithParameters.starts_with("opencl.")) {
3381 const SPIRV::OpenCLType *OCLTypeRecord =
3382 SPIRV::lookupOpenCLType(NameWithParameters);
3383 if (!OCLTypeRecord)
3384 report_fatal_error("Missing TableGen record for OpenCL type: " +
3385 NameWithParameters);
3386 NameWithParameters = OCLTypeRecord->SpirvTypeLiteral;
3387 // Continue with the SPIR-V builtin type...
3388 }
3389
3390 // Names of the opaque structs representing a SPIR-V builtins without
3391 // parameters should have the following format: e.g. %spirv.Event
3392 assert(NameWithParameters.starts_with("spirv.") &&
3393 "Unknown builtin opaque type!");
3394
3395 // Parameterized SPIR-V builtins names follow this format:
3396 // e.g. %spirv.Image._void_1_0_0_0_0_0_0, %spirv.Pipe._0
3397 if (!NameWithParameters.contains('_'))
3398 return TargetExtType::get(Context, NameWithParameters);
3399
3400 SmallVector<StringRef> Parameters;
3401 unsigned BaseNameLength = NameWithParameters.find('_') - 1;
3402 SplitString(NameWithParameters.substr(BaseNameLength + 1), Parameters, "_");
3403
3404 SmallVector<Type *, 1> TypeParameters;
3405 bool HasTypeParameter = !isDigit(Parameters[0][0]);
3406 if (HasTypeParameter)
3407 TypeParameters.push_back(parseTypeString(Parameters[0], Context));
3408 SmallVector<unsigned> IntParameters;
3409 for (unsigned i = HasTypeParameter ? 1 : 0; i < Parameters.size(); i++) {
3410 unsigned IntParameter = 0;
3411 bool ValidLiteral = !Parameters[i].getAsInteger(10, IntParameter);
3412 (void)ValidLiteral;
3413 assert(ValidLiteral &&
3414 "Invalid format of SPIR-V builtin parameter literal!");
3415 IntParameters.push_back(IntParameter);
3416 }
3417 return TargetExtType::get(Context,
3418 NameWithParameters.substr(0, BaseNameLength),
3419 TypeParameters, IntParameters);
3420}
3421
3423 SPIRV::AccessQualifier::AccessQualifier AccessQual,
3424 MachineIRBuilder &MIRBuilder,
3425 SPIRVGlobalRegistry *GR) {
3426 // In LLVM IR, SPIR-V and OpenCL builtin types are represented as either
3427 // target(...) target extension types or pointers-to-opaque-structs. The
3428 // approach relying on structs is deprecated and works only in the non-opaque
3429 // pointer mode (-opaque-pointers=0).
3430 // In order to maintain compatibility with LLVM IR generated by older versions
3431 // of Clang and LLVM/SPIR-V Translator, the pointers-to-opaque-structs are
3432 // "translated" to target extension types. This translation is temporary and
3433 // will be removed in the future release of LLVM.
3435 if (!BuiltinType)
3437 OpaqueType->getStructName().str(), MIRBuilder.getContext());
3438
3439 unsigned NumStartingVRegs = MIRBuilder.getMRI()->getNumVirtRegs();
3440
3441 const StringRef Name = BuiltinType->getName();
3442 LLVM_DEBUG(dbgs() << "Lowering builtin type: " << Name << "\n");
3443
3444 SPIRVType *TargetType;
3445 if (Name == "spirv.Type") {
3446 TargetType = getInlineSpirvType(BuiltinType, MIRBuilder, GR);
3447 } else if (Name == "spirv.VulkanBuffer") {
3448 TargetType = getVulkanBufferType(BuiltinType, MIRBuilder, GR);
3449 } else if (Name == "spirv.Padding") {
3450 TargetType = GR->getOrCreatePaddingType(MIRBuilder);
3451 } else if (Name == "spirv.Layout") {
3452 TargetType = getLayoutType(BuiltinType, MIRBuilder, GR);
3453 } else {
3454 // Lookup the demangled builtin type in the TableGen records.
3455 const SPIRV::BuiltinType *TypeRecord = SPIRV::lookupBuiltinType(Name);
3456 if (!TypeRecord)
3457 report_fatal_error("Missing TableGen record for builtin type: " + Name);
3458
3459 // "Lower" the BuiltinType into TargetType. The following get<...>Type
3460 // methods use the implementation details from TableGen records or
3461 // TargetExtType parameters to either create a new OpType<...> machine
3462 // instruction or get an existing equivalent SPIRVType from
3463 // GlobalRegistry.
3464
3465 switch (TypeRecord->Opcode) {
3466 case SPIRV::OpTypeImage:
3467 TargetType = GR->getImageType(BuiltinType, AccessQual, MIRBuilder);
3468 break;
3469 case SPIRV::OpTypePipe:
3470 TargetType = getPipeType(BuiltinType, MIRBuilder, GR);
3471 break;
3472 case SPIRV::OpTypeDeviceEvent:
3473 TargetType = GR->getOrCreateOpTypeDeviceEvent(MIRBuilder);
3474 break;
3475 case SPIRV::OpTypeSampler:
3476 TargetType = getSamplerType(MIRBuilder, GR);
3477 break;
3478 case SPIRV::OpTypeSampledImage:
3479 TargetType = getSampledImageType(BuiltinType, MIRBuilder, GR);
3480 break;
3481 case SPIRV::OpTypeCooperativeMatrixKHR:
3482 TargetType = getCoopMatrType(BuiltinType, MIRBuilder, GR);
3483 break;
3484 default:
3485 TargetType =
3486 getNonParameterizedType(BuiltinType, TypeRecord, MIRBuilder, GR);
3487 break;
3488 }
3489 }
3490
3491 // Emit OpName instruction if a new OpType<...> instruction was added
3492 // (equivalent type was not found in GlobalRegistry).
3493 if (NumStartingVRegs < MIRBuilder.getMRI()->getNumVirtRegs())
3494 buildOpName(GR->getSPIRVTypeID(TargetType), Name, MIRBuilder);
3495
3496 return TargetType;
3497}
3498} // namespace SPIRV
3499} // namespace llvm
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Lower Kernel Arguments
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
Promote Memory to Register
Definition Mem2Reg.cpp:110
#define T
spirv structurize SPIRV
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file contains some functions that are useful when dealing with strings.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static const fltSemantics & IEEEsingle()
Definition APFloat.h:296
APInt bitcastToAPInt() const
Definition APFloat.h:1335
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Definition APFloat.h:1061
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1541
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
const APFloat & getValueAPF() const
Definition Constants.h:320
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:154
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
Tagged union holding either a T or a Error.
Definition Error.h:485
Class to represent fixed width SIMD vectors.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
static MCOperand createReg(MCRegister Reg)
Definition MCInst.h:138
static MCOperand createImm(int64_t Val)
Definition MCInst.h:145
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
LLVMContext & getContext() const
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
const DataLayout & getDataLayout() const
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
LLVM_ABI void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
const GlobalValue * getGlobal() const
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
const MDNode * getMetadata() const
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
LLVM_ABI void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
unsigned getNumVirtRegs() const
getNumVirtRegs - Return the number of virtual registers created.
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isValid() const
Definition Register.h:112
SPIRVType * getOrCreateOpTypePipe(MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AccQual)
SPIRVType * getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
SPIRVType * getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder, bool EmitIR)
SPIRVType * getOrCreatePaddingType(MachineIRBuilder &MIRBuilder)
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, const MachineFunction &MF)
Register buildGlobalVariable(Register Reg, SPIRVType *BaseType, StringRef Name, const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage, const MachineInstr *Init, bool IsConst, const std::optional< SPIRV::LinkageType::LinkageType > &LinkageType, MachineIRBuilder &MIRBuilder, bool IsInstSelector)
const Type * getTypeForSPIRVType(const SPIRVType *Ty) const
SPIRVType * getOrCreateUnknownType(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode, const ArrayRef< MCOperand > Operands)
unsigned getScalarOrVectorComponentCount(Register VReg) const
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineInstr &I, SPIRV::AccessQualifier::AccessQualifier AQ, bool EmitIR)
SPIRVType * getOrCreateSPIRVPointerType(const Type *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SC)
SPIRVType * getOrCreateOpTypeByOpcode(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode)
Register buildConstantFP(APFloat Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr)
SPIRVType * getPointeeType(SPIRVType *PtrType)
Register getSPIRVTypeID(const SPIRVType *SpirvType) const
bool isScalarOfType(Register VReg, unsigned TypeOpcode) const
SPIRVType * getOrCreateOpTypeSampledImage(SPIRVType *ImageType, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateVulkanBufferType(MachineIRBuilder &MIRBuilder, Type *ElemType, SPIRV::StorageClass::StorageClass SC, bool IsWritable, bool EmitIr=false)
SPIRVType * getOrCreateSPIRVTypeByName(StringRef TypeStr, MachineIRBuilder &MIRBuilder, bool EmitIR, SPIRV::StorageClass::StorageClass SC=SPIRV::StorageClass::Function, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite)
SPIRVType * getOrCreateLayoutType(MachineIRBuilder &MIRBuilder, const TargetExtType *T, bool EmitIr=false)
Register getOrCreateConsIntVector(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, bool EmitIR)
const TargetRegisterClass * getRegClass(SPIRVType *SpvType) const
SPIRVType * getOrCreateSPIRVVectorType(SPIRVType *BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder, bool EmitIR)
SPIRVType * getOrCreateOpTypeCoopMatr(MachineIRBuilder &MIRBuilder, const TargetExtType *ExtensionType, const SPIRVType *ElemType, uint32_t Scope, uint32_t Rows, uint32_t Columns, uint32_t Use, bool EmitIR)
bool isScalarOrVectorOfType(Register VReg, unsigned TypeOpcode) const
Register getOrCreateConstIntArray(uint64_t Val, size_t Num, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
SPIRVType * getOrCreateOpTypeDeviceEvent(MachineIRBuilder &MIRBuilder)
SPIRVType * getImageType(const TargetExtType *ExtensionType, const SPIRV::AccessQualifier::AccessQualifier Qualifier, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineIRBuilder &MIRBuilder)
Register buildConstantInt(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, bool EmitIR, bool ZeroAsNull=true)
LLT getRegType(SPIRVType *SpvType) const
SPIRV::StorageClass::StorageClass getPointerStorageClass(Register VReg) const
SPIRVType * getOrCreateOpTypeSampler(MachineIRBuilder &MIRBuilder)
Register buildConstantSampler(Register Res, unsigned AddrMode, unsigned Param, unsigned FilerMode, MachineIRBuilder &MIRBuilder)
Register getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorBitWidth(const SPIRVType *Type) const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:702
static constexpr size_t npos
Definition StringRef.h:57
bool consume_back(StringRef Suffix)
Returns true if this StringRef has the given suffix and removes that suffix.
Definition StringRef.h:657
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:472
std::string str() const
str - Get the contents as an std::string.
Definition StringRef.h:225
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition StringRef.h:573
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
bool contains_insensitive(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition StringRef.h:438
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
Definition StringRef.h:686
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition StringRef.h:426
bool consume_front(StringRef Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
Definition StringRef.h:637
size_t find_first_of(char C, size_t From=0) const
Find the first character in the string that is C, or npos if not found.
Definition StringRef.h:376
size_t rfind(char C, size_t From=npos) const
Search for the last character C in the string.
Definition StringRef.h:345
size_t find(char C, size_t From=0) const
Search for the first character C in the string.
Definition StringRef.h:293
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition StringRef.h:273
A switch()-like statement whose cases are string literals.
StringSwitch & EndsWith(StringLiteral S, T Value)
Class to represent struct types.
Class to represent target extensions types, which are generally unintrospectable from target-independ...
ArrayRef< Type * > type_params() const
Return the type parameters for this particular target extension type.
unsigned getNumIntParameters() const
static LLVM_ABI TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types={}, ArrayRef< unsigned > Ints={})
Return a target extension type having the specified name and optional type and integer parameters.
Definition Type.cpp:907
Type * getTypeParameter(unsigned i) const
unsigned getNumTypeParameters() const
unsigned getIntParameter(unsigned i) const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
LLVM_ABI StringRef getStructName() const
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:280
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:294
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
Definition Type.cpp:284
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
Definition Type.cpp:282
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
LLVM Value Representation.
Definition Value.h:75
LLVM_ABI Value(Type *Ty, unsigned scid)
Definition Value.cpp:53
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Represents a version number in the form major[.minor[.subminor[.build]]].
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
CallInst * Call
LLVM_C_ABI LLVMTypeRef LLVMVectorType(LLVMTypeRef ElementType, unsigned ElementCount)
Create a vector type that contains a defined type and has a specific number of elements.
Definition Core.cpp:883
std::string lookupBuiltinNameHelper(StringRef DemangledCall, FPDecorationId *DecorationId)
Parses the name part of the demangled builtin call.
Type * parseBuiltinCallArgumentType(StringRef TypeStr, LLVMContext &Ctx)
bool parseBuiltinTypeStr(SmallVector< StringRef, 10 > &BuiltinArgsTypeStrs, const StringRef DemangledCall, LLVMContext &Ctx)
std::optional< bool > lowerBuiltin(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, MachineIRBuilder &MIRBuilder, const Register OrigRet, const Type *OrigRetTy, const SmallVectorImpl< Register > &Args, SPIRVGlobalRegistry *GR, const CallBase &CB)
std::tuple< int, unsigned, unsigned > mapBuiltinToOpcode(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set)
Helper function for finding a builtin function attributes by a demangled function name.
Type * parseBuiltinCallArgumentBaseType(const StringRef DemangledCall, unsigned ArgIdx, LLVMContext &Ctx)
Parses the provided ArgIdx argument base type in the DemangledCall skeleton.
TargetExtType * parseBuiltinTypeNameToTargetExtType(std::string TypeName, LLVMContext &Context)
Translates a string representing a SPIR-V or OpenCL builtin type to a TargetExtType that can be furth...
SPIRVType * lowerBuiltinType(const Type *OpaqueType, SPIRV::AccessQualifier::AccessQualifier AccessQual, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
static bool build2DBlockIOINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's 2d block io instructions.
static SPIRVType * getVulkanBufferType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateExtInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, const CallBase &CB)
void insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy, SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Helper external function for inserting ASSIGN_TYPE instuction between Reg and its definition,...
static bool generateBindlessImageINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateGetQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getInlineSpirvType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateConstructInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicFlagInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic flag instructions (e.g.
static bool generateImageSizeQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::SamplerFilterMode::SamplerFilterMode getSamplerFilterModeFromBitmask(unsigned Bitmask)
static bool buildAtomicStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic store instruction.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
static bool buildExtendedBitOpsInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building extended bit operations.
static const Type * getBlockStructType(Register ParamReg, MachineRegisterInfo *MRI)
static bool generateGroupInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
FPDecorationId demangledPostfixToDecorationId(const std::string &S)
Definition SPIRVUtils.h:544
static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim)
static bool generateICarryBorrowInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildScopeReg(Register CLScopeRegister, SPIRV::Scope::Scope Scope, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI)
FPDecorationId
Definition SPIRVUtils.h:542
static std::tuple< Register, SPIRVType * > buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType, SPIRVGlobalRegistry *GR)
Helper function building either a resulting scalar or vector bool register depending on the expected ...
static unsigned getNumSizeComponents(SPIRVType *imgType)
Helper function for obtaining the number of size components.
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
static Register buildConstantIntReg32(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getSampledImageType(const TargetExtType *OpaqueType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
SmallVector< MachineInstr *, 4 > createContinuedInstructions(MachineIRBuilder &MIRBuilder, unsigned Opcode, unsigned MinWC, unsigned ContinuedOpcode, ArrayRef< Register > Args, Register ReturnRegister, Register TypeID)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
constexpr unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
Definition SPIRVUtils.h:239
static bool generateSampleImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateBarrierInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI void SplitString(StringRef Source, SmallVectorImpl< StringRef > &OutFragments, StringRef Delimiters=" \t\n\v\f\r")
SplitString - Split up the specified string according to the specified delimiters,...
static SPIRVType * getCoopMatrType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateKernelClockInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static void setRegClassIfNull(Register Reg, MachineRegisterInfo *MRI, SPIRVGlobalRegistry *GR)
static bool generateGroupUniformInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateWaveInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Register createVirtualRegister(SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building barriers, i.e., memory/control ordering operations.
static bool generateAsyncCopy(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope)
static bool buildAPFixedPointInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getSamplerType(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateBlockingPipesInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
static Register buildLoadInst(SPIRVType *BaseType, Register PtrRegister, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, LLT LowLevelType, Register DestinationReg=Register(0))
Helper function for building a load instruction loading into the DestinationReg.
static bool generateEnqueueInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
static bool buildSelectInst(MachineIRBuilder &MIRBuilder, Register ReturnRegister, Register SourceRegister, const SPIRVType *ReturnType, SPIRVGlobalRegistry *GR)
Helper function for building either a vector or scalar select instruction depending on the expected R...
static const Type * getMachineInstrType(MachineInstr *MI)
bool isDigit(char C)
Checks if character C is one of the 10 decimal digits.
static SPIRV::SamplerAddressingMode::SamplerAddressingMode getSamplerAddressingModeFromBitmask(unsigned Bitmask)
static bool generateAtomicInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
const MachineInstr SPIRVType
static SPIRVType * getLayoutType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateDotOrFMulInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildBuiltinVariableLoad(MachineIRBuilder &MIRBuilder, SPIRVType *VariableType, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType, Register Reg=Register(0), bool isConst=true, const std::optional< SPIRV::LinkageType::LinkageType > &LinkageTy={ SPIRV::LinkageType::Import})
Helper function for building a load instruction for loading a builtin global variable of BuiltinValue...
static bool generateConvertInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateTernaryBitwiseFunctionINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateCastToPtrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
constexpr bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
Definition SPIRVUtils.h:224
static Register buildMemSemanticsReg(Register SemanticsRegister, Register PtrRegister, unsigned &Semantics, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static unsigned getConstFromIntrinsic(Register Reg, MachineRegisterInfo *MRI)
static bool generateImageMiscQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSelectInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic load instruction.
static bool generateIntelSubgroupsInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateExtendedBitOpsInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildPipeInst(const SPIRV::IncomingCall *Call, unsigned Opcode, unsigned Scope, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSpecConstantInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getOrCreateSPIRVDeviceEventPointer(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * parseBasicTypeName(StringRef &TypeName, LLVMContext &Ctx)
static bool generateVectorLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, uint64_t DefaultValue)
static bool generateCoopMatrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildBindlessImageINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's bindless image instructions.
static std::unique_ptr< const SPIRV::IncomingCall > lookupBuiltin(StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
Looks up the demangled builtin call in the SPIRVBuiltins.td records using the provided DemangledCall ...
static bool buildAtomicFloatingRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic floating-type instruction.
MachineInstr * getDefInstrMaybeConstant(Register &ConstReg, const MachineRegisterInfo *MRI)
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1867
static bool generate2DBlockIOINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateReadImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
bool hasBuiltinTypePrefix(StringRef Name)
static bool buildEnqueueKernel(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * getMDOperandAsType(const MDNode *N, unsigned I)
static bool generatePipeInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildTernaryBitwiseFunctionINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's OpBitwiseFunctionINTEL instruction.
static bool generateAPFixedPointInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic instructions.
static SPIRV::MemorySemantics::MemorySemantics getSPIRVMemSemantics(std::memory_order MemOrder)
static bool generateRelationalInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicInitInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
Helper function for translating atomic init to OpStore.
static bool generateWriteImageInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getPipeType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Type * parseTypeString(const StringRef Name, LLVMContext &Context)
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
static bool generatePredicatedLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateAtomicFloatingInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildNDRange(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getNonParameterizedType(const TargetExtType *ExtensionType, const SPIRV::BuiltinType *TypeRecord, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static MachineInstr * getBlockStructInstr(Register ParamReg, MachineRegisterInfo *MRI)
static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode, const SPIRV::IncomingCall *Call, Register TypeReg, ArrayRef< uint32_t > ImmArgs={})
static unsigned getSamplerParamFromBitmask(unsigned Bitmask)
static bool buildAtomicCompareExchangeInst(const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic compare-exchange instruction.
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
static bool generateBuiltinVar(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:867
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
This class contains a discriminated union of information about pointers in memory operands,...
FPRoundingMode::FPRoundingMode RoundingMode
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
const SmallVectorImpl< Register > & Arguments
const std::string BuiltinName
const SPIRVType * ReturnType
const DemangledBuiltin * Builtin
IncomingCall(const std::string BuiltinName, const DemangledBuiltin *Builtin, const Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
FPRoundingMode::FPRoundingMode RoundingMode