LLVM 22.0.0git
SPIRVBuiltins.cpp
Go to the documentation of this file.
1//===- SPIRVBuiltins.cpp - SPIR-V Built-in Functions ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements lowering builtin function calls and types using their
10// demangled names and TableGen records.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SPIRVBuiltins.h"
15#include "SPIRV.h"
16#include "SPIRVSubtarget.h"
17#include "SPIRVUtils.h"
20#include "llvm/IR/IntrinsicsSPIRV.h"
21#include <regex>
22#include <string>
23#include <tuple>
24
25#define DEBUG_TYPE "spirv-builtins"
26
27namespace llvm {
28namespace SPIRV {
29#define GET_BuiltinGroup_DECL
30#include "SPIRVGenTables.inc"
31
34 InstructionSet::InstructionSet Set;
35 BuiltinGroup Group;
38};
39
40#define GET_DemangledBuiltins_DECL
41#define GET_DemangledBuiltins_IMPL
42
60
63 InstructionSet::InstructionSet Set;
65};
66
67#define GET_NativeBuiltins_DECL
68#define GET_NativeBuiltins_IMPL
69
85
86#define GET_GroupBuiltins_DECL
87#define GET_GroupBuiltins_IMPL
88
96
97#define GET_IntelSubgroupsBuiltins_DECL
98#define GET_IntelSubgroupsBuiltins_IMPL
99
104
105#define GET_AtomicFloatingBuiltins_DECL
106#define GET_AtomicFloatingBuiltins_IMPL
112
113#define GET_GroupUniformBuiltins_DECL
114#define GET_GroupUniformBuiltins_IMPL
115
118 InstructionSet::InstructionSet Set;
119 BuiltIn::BuiltIn Value;
120};
121
122using namespace BuiltIn;
123#define GET_GetBuiltins_DECL
124#define GET_GetBuiltins_IMPL
125
128 InstructionSet::InstructionSet Set;
130};
131
132#define GET_ImageQueryBuiltins_DECL
133#define GET_ImageQueryBuiltins_IMPL
134
140
141#define GET_IntegerDotProductBuiltins_DECL
142#define GET_IntegerDotProductBuiltins_IMPL
143
146 InstructionSet::InstructionSet Set;
151 bool IsTF32;
152 FPRoundingMode::FPRoundingMode RoundingMode;
153};
154
157 InstructionSet::InstructionSet Set;
161 FPRoundingMode::FPRoundingMode RoundingMode;
162};
163
164using namespace FPRoundingMode;
165#define GET_ConvertBuiltins_DECL
166#define GET_ConvertBuiltins_IMPL
167
168using namespace InstructionSet;
169#define GET_VectorLoadStoreBuiltins_DECL
170#define GET_VectorLoadStoreBuiltins_IMPL
171
172#define GET_CLMemoryScope_DECL
173#define GET_CLSamplerAddressingMode_DECL
174#define GET_CLMemoryFenceFlags_DECL
175#define GET_ExtendedBuiltins_DECL
176#include "SPIRVGenTables.inc"
177} // namespace SPIRV
178
179//===----------------------------------------------------------------------===//
180// Misc functions for looking up builtins and veryfying requirements using
181// TableGen records
182//===----------------------------------------------------------------------===//
183
184namespace SPIRV {
185/// Parses the name part of the demangled builtin call.
186std::string lookupBuiltinNameHelper(StringRef DemangledCall,
187 FPDecorationId *DecorationId) {
188 StringRef PassPrefix = "(anonymous namespace)::";
189 std::string BuiltinName;
190 // Itanium Demangler result may have "(anonymous namespace)::" prefix
191 if (DemangledCall.starts_with(PassPrefix))
192 BuiltinName = DemangledCall.substr(PassPrefix.size());
193 else
194 BuiltinName = DemangledCall;
195 // Extract the builtin function name and types of arguments from the call
196 // skeleton.
197 BuiltinName = BuiltinName.substr(0, BuiltinName.find('('));
198
199 // Account for possible "__spirv_ocl_" prefix in SPIR-V friendly LLVM IR
200 if (BuiltinName.rfind("__spirv_ocl_", 0) == 0)
201 BuiltinName = BuiltinName.substr(12);
202
203 // Check if the extracted name contains type information between angle
204 // brackets. If so, the builtin is an instantiated template - needs to have
205 // the information after angle brackets and return type removed.
206 std::size_t Pos1 = BuiltinName.rfind('<');
207 if (Pos1 != std::string::npos && BuiltinName.back() == '>') {
208 std::size_t Pos2 = BuiltinName.rfind(' ', Pos1);
209 if (Pos2 == std::string::npos)
210 Pos2 = 0;
211 else
212 ++Pos2;
213 BuiltinName = BuiltinName.substr(Pos2, Pos1 - Pos2);
214 BuiltinName = BuiltinName.substr(BuiltinName.find_last_of(' ') + 1);
215 }
216
217 // Check if the extracted name begins with:
218 // - "__spirv_ImageSampleExplicitLod"
219 // - "__spirv_ImageRead"
220 // - "__spirv_ImageWrite"
221 // - "__spirv_ImageQuerySizeLod"
222 // - "__spirv_UDotKHR"
223 // - "__spirv_SDotKHR"
224 // - "__spirv_SUDotKHR"
225 // - "__spirv_SDotAccSatKHR"
226 // - "__spirv_UDotAccSatKHR"
227 // - "__spirv_SUDotAccSatKHR"
228 // - "__spirv_ReadClockKHR"
229 // - "__spirv_SubgroupBlockReadINTEL"
230 // - "__spirv_SubgroupImageBlockReadINTEL"
231 // - "__spirv_SubgroupImageMediaBlockReadINTEL"
232 // - "__spirv_SubgroupImageMediaBlockWriteINTEL"
233 // - "__spirv_Convert"
234 // - "__spirv_Round"
235 // - "__spirv_UConvert"
236 // - "__spirv_SConvert"
237 // - "__spirv_FConvert"
238 // - "__spirv_SatConvert"
239 // and maybe contains return type information at the end "_R<type>".
240 // If so, extract the plain builtin name without the type information.
241 static const std::regex SpvWithR(
242 "(__spirv_(ImageSampleExplicitLod|ImageRead|ImageWrite|ImageQuerySizeLod|"
243 "UDotKHR|"
244 "SDotKHR|SUDotKHR|SDotAccSatKHR|UDotAccSatKHR|SUDotAccSatKHR|"
245 "ReadClockKHR|SubgroupBlockReadINTEL|SubgroupImageBlockReadINTEL|"
246 "SubgroupImageMediaBlockReadINTEL|SubgroupImageMediaBlockWriteINTEL|"
247 "Convert|Round|"
248 "UConvert|SConvert|FConvert|SatConvert)[^_]*)(_R[^_]*_?(\\w+)?.*)?");
249 std::smatch Match;
250 if (std::regex_match(BuiltinName, Match, SpvWithR) && Match.size() > 1) {
251 std::ssub_match SubMatch;
252 if (DecorationId && Match.size() > 3) {
253 SubMatch = Match[4];
254 *DecorationId = demangledPostfixToDecorationId(SubMatch.str());
255 }
256 SubMatch = Match[1];
257 BuiltinName = SubMatch.str();
258 }
259
260 return BuiltinName;
261}
262} // namespace SPIRV
263
264/// Looks up the demangled builtin call in the SPIRVBuiltins.td records using
265/// the provided \p DemangledCall and specified \p Set.
266///
267/// The lookup follows the following algorithm, returning the first successful
268/// match:
269/// 1. Search with the plain demangled name (expecting a 1:1 match).
270/// 2. Search with the prefix before or suffix after the demangled name
271/// signyfying the type of the first argument.
272///
273/// \returns Wrapper around the demangled call and found builtin definition.
274static std::unique_ptr<const SPIRV::IncomingCall>
276 SPIRV::InstructionSet::InstructionSet Set,
277 Register ReturnRegister, const SPIRVType *ReturnType,
279 std::string BuiltinName = SPIRV::lookupBuiltinNameHelper(DemangledCall);
280
281 SmallVector<StringRef, 10> BuiltinArgumentTypes;
282 StringRef BuiltinArgs =
283 DemangledCall.slice(DemangledCall.find('(') + 1, DemangledCall.find(')'));
284 BuiltinArgs.split(BuiltinArgumentTypes, ',', -1, false);
285
286 // Look up the builtin in the defined set. Start with the plain demangled
287 // name, expecting a 1:1 match in the defined builtin set.
288 const SPIRV::DemangledBuiltin *Builtin;
289 if ((Builtin = SPIRV::lookupBuiltin(BuiltinName, Set)))
290 return std::make_unique<SPIRV::IncomingCall>(
291 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
292
293 // If the initial look up was unsuccessful and the demangled call takes at
294 // least 1 argument, add a prefix or suffix signifying the type of the first
295 // argument and repeat the search.
296 if (BuiltinArgumentTypes.size() >= 1) {
297 char FirstArgumentType = BuiltinArgumentTypes[0][0];
298 // Prefix to be added to the builtin's name for lookup.
299 // For example, OpenCL "abs" taking an unsigned value has a prefix "u_".
300 std::string Prefix;
301
302 switch (FirstArgumentType) {
303 // Unsigned:
304 case 'u':
305 if (Set == SPIRV::InstructionSet::OpenCL_std)
306 Prefix = "u_";
307 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
308 Prefix = "u";
309 break;
310 // Signed:
311 case 'c':
312 case 's':
313 case 'i':
314 case 'l':
315 if (Set == SPIRV::InstructionSet::OpenCL_std)
316 Prefix = "s_";
317 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
318 Prefix = "s";
319 break;
320 // Floating-point:
321 case 'f':
322 case 'd':
323 case 'h':
324 if (Set == SPIRV::InstructionSet::OpenCL_std ||
325 Set == SPIRV::InstructionSet::GLSL_std_450)
326 Prefix = "f";
327 break;
328 }
329
330 // If argument-type name prefix was added, look up the builtin again.
331 if (!Prefix.empty() &&
332 (Builtin = SPIRV::lookupBuiltin(Prefix + BuiltinName, Set)))
333 return std::make_unique<SPIRV::IncomingCall>(
334 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
335
336 // If lookup with a prefix failed, find a suffix to be added to the
337 // builtin's name for lookup. For example, OpenCL "group_reduce_max" taking
338 // an unsigned value has a suffix "u".
339 std::string Suffix;
340
341 switch (FirstArgumentType) {
342 // Unsigned:
343 case 'u':
344 Suffix = "u";
345 break;
346 // Signed:
347 case 'c':
348 case 's':
349 case 'i':
350 case 'l':
351 Suffix = "s";
352 break;
353 // Floating-point:
354 case 'f':
355 case 'd':
356 case 'h':
357 Suffix = "f";
358 break;
359 }
360
361 // If argument-type name suffix was added, look up the builtin again.
362 if (!Suffix.empty() &&
363 (Builtin = SPIRV::lookupBuiltin(BuiltinName + Suffix, Set)))
364 return std::make_unique<SPIRV::IncomingCall>(
365 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
366 }
367
368 // No builtin with such name was found in the set.
369 return nullptr;
370}
371
374 // We expect the following sequence of instructions:
375 // %0:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.alloca)
376 // or = G_GLOBAL_VALUE @block_literal_global
377 // %1:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.bitcast), %0
378 // %2:_(p4) = G_ADDRSPACE_CAST %1:_(pN)
379 MachineInstr *MI = MRI->getUniqueVRegDef(ParamReg);
380 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST &&
381 MI->getOperand(1).isReg());
382 Register BitcastReg = MI->getOperand(1).getReg();
383 MachineInstr *BitcastMI = MRI->getUniqueVRegDef(BitcastReg);
384 assert(isSpvIntrinsic(*BitcastMI, Intrinsic::spv_bitcast) &&
385 BitcastMI->getOperand(2).isReg());
386 Register ValueReg = BitcastMI->getOperand(2).getReg();
387 MachineInstr *ValueMI = MRI->getUniqueVRegDef(ValueReg);
388 return ValueMI;
389}
390
391// Return an integer constant corresponding to the given register and
392// defined in spv_track_constant.
393// TODO: maybe unify with prelegalizer pass.
395 MachineInstr *DefMI = MRI->getUniqueVRegDef(Reg);
396 assert(DefMI->getOpcode() == TargetOpcode::G_CONSTANT &&
397 DefMI->getOperand(1).isCImm());
398 return DefMI->getOperand(1).getCImm()->getValue().getZExtValue();
399}
400
401// Return type of the instruction result from spv_assign_type intrinsic.
402// TODO: maybe unify with prelegalizer pass.
404 MachineInstr *NextMI = MI->getNextNode();
405 if (!NextMI)
406 return nullptr;
407 if (isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_name))
408 if ((NextMI = NextMI->getNextNode()) == nullptr)
409 return nullptr;
410 Register ValueReg = MI->getOperand(0).getReg();
411 if ((!isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_type) &&
412 !isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_ptr_type)) ||
413 NextMI->getOperand(1).getReg() != ValueReg)
414 return nullptr;
415 Type *Ty = getMDOperandAsType(NextMI->getOperand(2).getMetadata(), 0);
416 assert(Ty && "Type is expected");
417 return Ty;
418}
419
420static const Type *getBlockStructType(Register ParamReg,
422 // In principle, this information should be passed to us from Clang via
423 // an elementtype attribute. However, said attribute requires that
424 // the function call be an intrinsic, which is not. Instead, we rely on being
425 // able to trace this to the declaration of a variable: OpenCL C specification
426 // section 6.12.5 should guarantee that we can do this.
428 if (MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE)
429 return MI->getOperand(1).getGlobal()->getType();
430 assert(isSpvIntrinsic(*MI, Intrinsic::spv_alloca) &&
431 "Blocks in OpenCL C must be traceable to allocation site");
432 return getMachineInstrType(MI);
433}
434
435//===----------------------------------------------------------------------===//
436// Helper functions for building misc instructions
437//===----------------------------------------------------------------------===//
438
439/// Helper function building either a resulting scalar or vector bool register
440/// depending on the expected \p ResultType.
441///
442/// \returns Tuple of the resulting register and its type.
443static std::tuple<Register, SPIRVType *>
444buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType,
446 LLT Type;
447 SPIRVType *BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder, true);
448
449 if (ResultType->getOpcode() == SPIRV::OpTypeVector) {
450 unsigned VectorElements = ResultType->getOperand(2).getImm();
451 BoolType = GR->getOrCreateSPIRVVectorType(BoolType, VectorElements,
452 MIRBuilder, true);
455 Type = LLT::vector(LLVMVectorType->getElementCount(), 1);
456 } else {
457 Type = LLT::scalar(1);
458 }
459
460 Register ResultRegister =
462 MIRBuilder.getMRI()->setRegClass(ResultRegister, GR->getRegClass(ResultType));
463 GR->assignSPIRVTypeToVReg(BoolType, ResultRegister, MIRBuilder.getMF());
464 return std::make_tuple(ResultRegister, BoolType);
465}
466
467/// Helper function for building either a vector or scalar select instruction
468/// depending on the expected \p ResultType.
469static bool buildSelectInst(MachineIRBuilder &MIRBuilder,
470 Register ReturnRegister, Register SourceRegister,
471 const SPIRVType *ReturnType,
473 Register TrueConst, FalseConst;
474
475 if (ReturnType->getOpcode() == SPIRV::OpTypeVector) {
476 unsigned Bits = GR->getScalarOrVectorBitWidth(ReturnType);
478 TrueConst =
479 GR->getOrCreateConsIntVector(AllOnes, MIRBuilder, ReturnType, true);
480 FalseConst = GR->getOrCreateConsIntVector(0, MIRBuilder, ReturnType, true);
481 } else {
482 TrueConst = GR->buildConstantInt(1, MIRBuilder, ReturnType, true);
483 FalseConst = GR->buildConstantInt(0, MIRBuilder, ReturnType, true);
484 }
485
486 return MIRBuilder.buildSelect(ReturnRegister, SourceRegister, TrueConst,
487 FalseConst);
488}
489
490/// Helper function for building a load instruction loading into the
491/// \p DestinationReg.
493 MachineIRBuilder &MIRBuilder,
494 SPIRVGlobalRegistry *GR, LLT LowLevelType,
495 Register DestinationReg = Register(0)) {
496 if (!DestinationReg.isValid())
497 DestinationReg = createVirtualRegister(BaseType, GR, MIRBuilder);
498 // TODO: consider using correct address space and alignment (p0 is canonical
499 // type for selection though).
501 MIRBuilder.buildLoad(DestinationReg, PtrRegister, PtrInfo, Align());
502 return DestinationReg;
503}
504
505/// Helper function for building a load instruction for loading a builtin global
506/// variable of \p BuiltinValue value.
508 MachineIRBuilder &MIRBuilder, SPIRVType *VariableType,
509 SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType,
510 Register Reg = Register(0), bool isConst = true,
511 const std::optional<SPIRV::LinkageType::LinkageType> &LinkageTy = {
512 SPIRV::LinkageType::Import}) {
513 Register NewRegister =
514 MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::pIDRegClass);
515 MIRBuilder.getMRI()->setType(
516 NewRegister,
517 LLT::pointer(storageClassToAddressSpace(SPIRV::StorageClass::Function),
518 GR->getPointerSize()));
520 VariableType, MIRBuilder, SPIRV::StorageClass::Input);
521 GR->assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
522
523 // Set up the global OpVariable with the necessary builtin decorations.
524 Register Variable = GR->buildGlobalVariable(
525 NewRegister, PtrType, getLinkStringForBuiltIn(BuiltinValue), nullptr,
526 SPIRV::StorageClass::Input, nullptr, /* isConst= */ isConst, LinkageTy,
527 MIRBuilder, false);
528
529 // Load the value from the global variable.
530 Register LoadedRegister =
531 buildLoadInst(VariableType, Variable, MIRBuilder, GR, LLType, Reg);
532 MIRBuilder.getMRI()->setType(LoadedRegister, LLType);
533 return LoadedRegister;
534}
535
536/// Helper external function for assigning SPIRVType to a register, ensuring the
537/// register class and type are set in MRI. Defined in SPIRVPreLegalizer.cpp.
538extern void updateRegType(Register Reg, Type *Ty, SPIRVType *SpirvTy,
541
542// TODO: Move to TableGen.
543static SPIRV::MemorySemantics::MemorySemantics
544getSPIRVMemSemantics(std::memory_order MemOrder) {
545 switch (MemOrder) {
546 case std::memory_order_relaxed:
547 return SPIRV::MemorySemantics::None;
548 case std::memory_order_acquire:
549 return SPIRV::MemorySemantics::Acquire;
550 case std::memory_order_release:
551 return SPIRV::MemorySemantics::Release;
552 case std::memory_order_acq_rel:
553 return SPIRV::MemorySemantics::AcquireRelease;
554 case std::memory_order_seq_cst:
555 return SPIRV::MemorySemantics::SequentiallyConsistent;
556 default:
557 report_fatal_error("Unknown CL memory scope");
558 }
559}
560
561static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope) {
562 switch (ClScope) {
563 case SPIRV::CLMemoryScope::memory_scope_work_item:
564 return SPIRV::Scope::Invocation;
565 case SPIRV::CLMemoryScope::memory_scope_work_group:
566 return SPIRV::Scope::Workgroup;
567 case SPIRV::CLMemoryScope::memory_scope_device:
568 return SPIRV::Scope::Device;
569 case SPIRV::CLMemoryScope::memory_scope_all_svm_devices:
570 return SPIRV::Scope::CrossDevice;
571 case SPIRV::CLMemoryScope::memory_scope_sub_group:
572 return SPIRV::Scope::Subgroup;
573 }
574 report_fatal_error("Unknown CL memory scope");
575}
576
578 MachineIRBuilder &MIRBuilder,
580 return GR->buildConstantInt(
581 Val, MIRBuilder, GR->getOrCreateSPIRVIntegerType(32, MIRBuilder), true);
582}
583
584static Register buildScopeReg(Register CLScopeRegister,
585 SPIRV::Scope::Scope Scope,
586 MachineIRBuilder &MIRBuilder,
589 if (CLScopeRegister.isValid()) {
590 auto CLScope =
591 static_cast<SPIRV::CLMemoryScope>(getIConstVal(CLScopeRegister, MRI));
592 Scope = getSPIRVScope(CLScope);
593
594 if (CLScope == static_cast<unsigned>(Scope)) {
595 MRI->setRegClass(CLScopeRegister, &SPIRV::iIDRegClass);
596 return CLScopeRegister;
597 }
598 }
599 return buildConstantIntReg32(Scope, MIRBuilder, GR);
600}
601
604 if (MRI->getRegClassOrNull(Reg))
605 return;
606 SPIRVType *SpvType = GR->getSPIRVTypeForVReg(Reg);
607 MRI->setRegClass(Reg,
608 SpvType ? GR->getRegClass(SpvType) : &SPIRV::iIDRegClass);
609}
610
611static Register buildMemSemanticsReg(Register SemanticsRegister,
612 Register PtrRegister, unsigned &Semantics,
613 MachineIRBuilder &MIRBuilder,
615 if (SemanticsRegister.isValid()) {
616 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
617 std::memory_order Order =
618 static_cast<std::memory_order>(getIConstVal(SemanticsRegister, MRI));
619 Semantics =
620 getSPIRVMemSemantics(Order) |
622 if (static_cast<unsigned>(Order) == Semantics) {
623 MRI->setRegClass(SemanticsRegister, &SPIRV::iIDRegClass);
624 return SemanticsRegister;
625 }
626 }
627 return buildConstantIntReg32(Semantics, MIRBuilder, GR);
628}
629
630static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode,
632 Register TypeReg,
633 ArrayRef<uint32_t> ImmArgs = {}) {
634 auto MIB = MIRBuilder.buildInstr(Opcode);
635 if (TypeReg.isValid())
636 MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
637 unsigned Sz = Call->Arguments.size() - ImmArgs.size();
638 for (unsigned i = 0; i < Sz; ++i)
639 MIB.addUse(Call->Arguments[i]);
640 for (uint32_t ImmArg : ImmArgs)
641 MIB.addImm(ImmArg);
642 return true;
643}
644
645/// Helper function for translating atomic init to OpStore.
647 MachineIRBuilder &MIRBuilder) {
648 if (Call->isSpirvOp())
649 return buildOpFromWrapper(MIRBuilder, SPIRV::OpStore, Call, Register(0));
650
651 assert(Call->Arguments.size() == 2 &&
652 "Need 2 arguments for atomic init translation");
653 MIRBuilder.buildInstr(SPIRV::OpStore)
654 .addUse(Call->Arguments[0])
655 .addUse(Call->Arguments[1]);
656 return true;
657}
658
659/// Helper function for building an atomic load instruction.
661 MachineIRBuilder &MIRBuilder,
663 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
664 if (Call->isSpirvOp())
665 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicLoad, Call, TypeReg);
666
667 Register PtrRegister = Call->Arguments[0];
668 // TODO: if true insert call to __translate_ocl_memory_sccope before
669 // OpAtomicLoad and the function implementation. We can use Translator's
670 // output for transcoding/atomic_explicit_arguments.cl as an example.
671 Register ScopeRegister =
672 Call->Arguments.size() > 1
673 ? Call->Arguments[1]
674 : buildConstantIntReg32(SPIRV::Scope::Device, MIRBuilder, GR);
675 Register MemSemanticsReg;
676 if (Call->Arguments.size() > 2) {
677 // TODO: Insert call to __translate_ocl_memory_order before OpAtomicLoad.
678 MemSemanticsReg = Call->Arguments[2];
679 } else {
680 int Semantics =
681 SPIRV::MemorySemantics::SequentiallyConsistent |
683 MemSemanticsReg = buildConstantIntReg32(Semantics, MIRBuilder, GR);
684 }
685
686 MIRBuilder.buildInstr(SPIRV::OpAtomicLoad)
687 .addDef(Call->ReturnRegister)
688 .addUse(TypeReg)
689 .addUse(PtrRegister)
690 .addUse(ScopeRegister)
691 .addUse(MemSemanticsReg);
692 return true;
693}
694
695/// Helper function for building an atomic store instruction.
697 MachineIRBuilder &MIRBuilder,
699 if (Call->isSpirvOp())
700 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicStore, Call,
701 Register(0));
702
703 Register ScopeRegister =
704 buildConstantIntReg32(SPIRV::Scope::Device, MIRBuilder, GR);
705 Register PtrRegister = Call->Arguments[0];
706 int Semantics =
707 SPIRV::MemorySemantics::SequentiallyConsistent |
709 Register MemSemanticsReg = buildConstantIntReg32(Semantics, MIRBuilder, GR);
710 MIRBuilder.buildInstr(SPIRV::OpAtomicStore)
711 .addUse(PtrRegister)
712 .addUse(ScopeRegister)
713 .addUse(MemSemanticsReg)
714 .addUse(Call->Arguments[1]);
715 return true;
716}
717
718/// Helper function for building an atomic compare-exchange instruction.
720 const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin,
721 unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
722 if (Call->isSpirvOp())
723 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
724 GR->getSPIRVTypeID(Call->ReturnType));
725
726 bool IsCmpxchg = Call->Builtin->Name.contains("cmpxchg");
727 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
728
729 Register ObjectPtr = Call->Arguments[0]; // Pointer (volatile A *object.)
730 Register ExpectedArg = Call->Arguments[1]; // Comparator (C* expected).
731 Register Desired = Call->Arguments[2]; // Value (C Desired).
732 SPIRVType *SpvDesiredTy = GR->getSPIRVTypeForVReg(Desired);
733 LLT DesiredLLT = MRI->getType(Desired);
734
735 assert(GR->getSPIRVTypeForVReg(ObjectPtr)->getOpcode() ==
736 SPIRV::OpTypePointer);
737 unsigned ExpectedType = GR->getSPIRVTypeForVReg(ExpectedArg)->getOpcode();
738 (void)ExpectedType;
739 assert(IsCmpxchg ? ExpectedType == SPIRV::OpTypeInt
740 : ExpectedType == SPIRV::OpTypePointer);
741 assert(GR->isScalarOfType(Desired, SPIRV::OpTypeInt));
742
743 SPIRVType *SpvObjectPtrTy = GR->getSPIRVTypeForVReg(ObjectPtr);
744 assert(SpvObjectPtrTy->getOperand(2).isReg() && "SPIRV type is expected");
745 auto StorageClass = static_cast<SPIRV::StorageClass::StorageClass>(
746 SpvObjectPtrTy->getOperand(1).getImm());
747 auto MemSemStorage = getMemSemanticsForStorageClass(StorageClass);
748
749 Register MemSemEqualReg;
750 Register MemSemUnequalReg;
751 uint64_t MemSemEqual =
752 IsCmpxchg
753 ? SPIRV::MemorySemantics::None
754 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
755 uint64_t MemSemUnequal =
756 IsCmpxchg
757 ? SPIRV::MemorySemantics::None
758 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
759 if (Call->Arguments.size() >= 4) {
760 assert(Call->Arguments.size() >= 5 &&
761 "Need 5+ args for explicit atomic cmpxchg");
762 auto MemOrdEq =
763 static_cast<std::memory_order>(getIConstVal(Call->Arguments[3], MRI));
764 auto MemOrdNeq =
765 static_cast<std::memory_order>(getIConstVal(Call->Arguments[4], MRI));
766 MemSemEqual = getSPIRVMemSemantics(MemOrdEq) | MemSemStorage;
767 MemSemUnequal = getSPIRVMemSemantics(MemOrdNeq) | MemSemStorage;
768 if (static_cast<unsigned>(MemOrdEq) == MemSemEqual)
769 MemSemEqualReg = Call->Arguments[3];
770 if (static_cast<unsigned>(MemOrdNeq) == MemSemEqual)
771 MemSemUnequalReg = Call->Arguments[4];
772 }
773 if (!MemSemEqualReg.isValid())
774 MemSemEqualReg = buildConstantIntReg32(MemSemEqual, MIRBuilder, GR);
775 if (!MemSemUnequalReg.isValid())
776 MemSemUnequalReg = buildConstantIntReg32(MemSemUnequal, MIRBuilder, GR);
777
778 Register ScopeReg;
779 auto Scope = IsCmpxchg ? SPIRV::Scope::Workgroup : SPIRV::Scope::Device;
780 if (Call->Arguments.size() >= 6) {
781 assert(Call->Arguments.size() == 6 &&
782 "Extra args for explicit atomic cmpxchg");
783 auto ClScope = static_cast<SPIRV::CLMemoryScope>(
784 getIConstVal(Call->Arguments[5], MRI));
785 Scope = getSPIRVScope(ClScope);
786 if (ClScope == static_cast<unsigned>(Scope))
787 ScopeReg = Call->Arguments[5];
788 }
789 if (!ScopeReg.isValid())
790 ScopeReg = buildConstantIntReg32(Scope, MIRBuilder, GR);
791
792 Register Expected = IsCmpxchg
793 ? ExpectedArg
794 : buildLoadInst(SpvDesiredTy, ExpectedArg, MIRBuilder,
795 GR, LLT::scalar(64));
796 MRI->setType(Expected, DesiredLLT);
797 Register Tmp = !IsCmpxchg ? MRI->createGenericVirtualRegister(DesiredLLT)
798 : Call->ReturnRegister;
799 if (!MRI->getRegClassOrNull(Tmp))
800 MRI->setRegClass(Tmp, GR->getRegClass(SpvDesiredTy));
801 GR->assignSPIRVTypeToVReg(SpvDesiredTy, Tmp, MIRBuilder.getMF());
802
803 MIRBuilder.buildInstr(Opcode)
804 .addDef(Tmp)
805 .addUse(GR->getSPIRVTypeID(SpvDesiredTy))
806 .addUse(ObjectPtr)
807 .addUse(ScopeReg)
808 .addUse(MemSemEqualReg)
809 .addUse(MemSemUnequalReg)
810 .addUse(Desired)
812 if (!IsCmpxchg) {
813 MIRBuilder.buildInstr(SPIRV::OpStore).addUse(ExpectedArg).addUse(Tmp);
814 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Call->ReturnRegister, Tmp, Expected);
815 }
816 return true;
817}
818
819/// Helper function for building atomic instructions.
820static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
821 MachineIRBuilder &MIRBuilder,
823 if (Call->isSpirvOp())
824 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
825 GR->getSPIRVTypeID(Call->ReturnType));
826
827 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
828 Register ScopeRegister =
829 Call->Arguments.size() >= 4 ? Call->Arguments[3] : Register();
830
831 assert(Call->Arguments.size() <= 4 &&
832 "Too many args for explicit atomic RMW");
833 ScopeRegister = buildScopeReg(ScopeRegister, SPIRV::Scope::Workgroup,
834 MIRBuilder, GR, MRI);
835
836 Register PtrRegister = Call->Arguments[0];
837 unsigned Semantics = SPIRV::MemorySemantics::None;
838 Register MemSemanticsReg =
839 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
840 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
841 Semantics, MIRBuilder, GR);
842 Register ValueReg = Call->Arguments[1];
843 Register ValueTypeReg = GR->getSPIRVTypeID(Call->ReturnType);
844 // support cl_ext_float_atomics
845 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeFloat) {
846 if (Opcode == SPIRV::OpAtomicIAdd) {
847 Opcode = SPIRV::OpAtomicFAddEXT;
848 } else if (Opcode == SPIRV::OpAtomicISub) {
849 // Translate OpAtomicISub applied to a floating type argument to
850 // OpAtomicFAddEXT with the negative value operand
851 Opcode = SPIRV::OpAtomicFAddEXT;
852 Register NegValueReg =
853 MRI->createGenericVirtualRegister(MRI->getType(ValueReg));
854 MRI->setRegClass(NegValueReg, GR->getRegClass(Call->ReturnType));
855 GR->assignSPIRVTypeToVReg(Call->ReturnType, NegValueReg,
856 MIRBuilder.getMF());
857 MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
858 .addDef(NegValueReg)
859 .addUse(ValueReg);
860 updateRegType(NegValueReg, nullptr, Call->ReturnType, GR, MIRBuilder,
861 MIRBuilder.getMF().getRegInfo());
862 ValueReg = NegValueReg;
863 }
864 }
865 MIRBuilder.buildInstr(Opcode)
866 .addDef(Call->ReturnRegister)
867 .addUse(ValueTypeReg)
868 .addUse(PtrRegister)
869 .addUse(ScopeRegister)
870 .addUse(MemSemanticsReg)
871 .addUse(ValueReg);
872 return true;
873}
874
875/// Helper function for building an atomic floating-type instruction.
877 unsigned Opcode,
878 MachineIRBuilder &MIRBuilder,
880 assert(Call->Arguments.size() == 4 &&
881 "Wrong number of atomic floating-type builtin");
882 Register PtrReg = Call->Arguments[0];
883 Register ScopeReg = Call->Arguments[1];
884 Register MemSemanticsReg = Call->Arguments[2];
885 Register ValueReg = Call->Arguments[3];
886 MIRBuilder.buildInstr(Opcode)
887 .addDef(Call->ReturnRegister)
888 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
889 .addUse(PtrReg)
890 .addUse(ScopeReg)
891 .addUse(MemSemanticsReg)
892 .addUse(ValueReg);
893 return true;
894}
895
896/// Helper function for building atomic flag instructions (e.g.
897/// OpAtomicFlagTestAndSet).
899 unsigned Opcode, MachineIRBuilder &MIRBuilder,
901 bool IsSet = Opcode == SPIRV::OpAtomicFlagTestAndSet;
902 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
903 if (Call->isSpirvOp())
904 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
905 IsSet ? TypeReg : Register(0));
906
907 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
908 Register PtrRegister = Call->Arguments[0];
909 unsigned Semantics = SPIRV::MemorySemantics::SequentiallyConsistent;
910 Register MemSemanticsReg =
911 Call->Arguments.size() >= 2 ? Call->Arguments[1] : Register();
912 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
913 Semantics, MIRBuilder, GR);
914
915 assert((Opcode != SPIRV::OpAtomicFlagClear ||
916 (Semantics != SPIRV::MemorySemantics::Acquire &&
917 Semantics != SPIRV::MemorySemantics::AcquireRelease)) &&
918 "Invalid memory order argument!");
919
920 Register ScopeRegister =
921 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
922 ScopeRegister =
923 buildScopeReg(ScopeRegister, SPIRV::Scope::Device, MIRBuilder, GR, MRI);
924
925 auto MIB = MIRBuilder.buildInstr(Opcode);
926 if (IsSet)
927 MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
928
929 MIB.addUse(PtrRegister).addUse(ScopeRegister).addUse(MemSemanticsReg);
930 return true;
931}
932
933/// Helper function for building barriers, i.e., memory/control ordering
934/// operations.
935static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
936 MachineIRBuilder &MIRBuilder,
938 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
939 const auto *ST =
940 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
941 if ((Opcode == SPIRV::OpControlBarrierArriveINTEL ||
942 Opcode == SPIRV::OpControlBarrierWaitINTEL) &&
943 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_split_barrier)) {
944 std::string DiagMsg = std::string(Builtin->Name) +
945 ": the builtin requires the following SPIR-V "
946 "extension: SPV_INTEL_split_barrier";
947 report_fatal_error(DiagMsg.c_str(), false);
948 }
949
950 if (Call->isSpirvOp())
951 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
952
953 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
954 unsigned MemFlags = getIConstVal(Call->Arguments[0], MRI);
955 unsigned MemSemantics = SPIRV::MemorySemantics::None;
956
957 if (MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE)
958 MemSemantics |= SPIRV::MemorySemantics::WorkgroupMemory;
959
960 if (MemFlags & SPIRV::CLK_GLOBAL_MEM_FENCE)
961 MemSemantics |= SPIRV::MemorySemantics::CrossWorkgroupMemory;
962
963 if (MemFlags & SPIRV::CLK_IMAGE_MEM_FENCE)
964 MemSemantics |= SPIRV::MemorySemantics::ImageMemory;
965
966 if (Opcode == SPIRV::OpMemoryBarrier)
967 MemSemantics = getSPIRVMemSemantics(static_cast<std::memory_order>(
968 getIConstVal(Call->Arguments[1], MRI))) |
969 MemSemantics;
970 else if (Opcode == SPIRV::OpControlBarrierArriveINTEL)
971 MemSemantics |= SPIRV::MemorySemantics::Release;
972 else if (Opcode == SPIRV::OpControlBarrierWaitINTEL)
973 MemSemantics |= SPIRV::MemorySemantics::Acquire;
974 else
975 MemSemantics |= SPIRV::MemorySemantics::SequentiallyConsistent;
976
977 Register MemSemanticsReg =
978 MemFlags == MemSemantics
979 ? Call->Arguments[0]
980 : buildConstantIntReg32(MemSemantics, MIRBuilder, GR);
981 Register ScopeReg;
982 SPIRV::Scope::Scope Scope = SPIRV::Scope::Workgroup;
983 SPIRV::Scope::Scope MemScope = Scope;
984 if (Call->Arguments.size() >= 2) {
985 assert(
986 ((Opcode != SPIRV::OpMemoryBarrier && Call->Arguments.size() == 2) ||
987 (Opcode == SPIRV::OpMemoryBarrier && Call->Arguments.size() == 3)) &&
988 "Extra args for explicitly scoped barrier");
989 Register ScopeArg = (Opcode == SPIRV::OpMemoryBarrier) ? Call->Arguments[2]
990 : Call->Arguments[1];
991 SPIRV::CLMemoryScope CLScope =
992 static_cast<SPIRV::CLMemoryScope>(getIConstVal(ScopeArg, MRI));
993 MemScope = getSPIRVScope(CLScope);
994 if (!(MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE) ||
995 (Opcode == SPIRV::OpMemoryBarrier))
996 Scope = MemScope;
997 if (CLScope == static_cast<unsigned>(Scope))
998 ScopeReg = Call->Arguments[1];
999 }
1000
1001 if (!ScopeReg.isValid())
1002 ScopeReg = buildConstantIntReg32(Scope, MIRBuilder, GR);
1003
1004 auto MIB = MIRBuilder.buildInstr(Opcode).addUse(ScopeReg);
1005 if (Opcode != SPIRV::OpMemoryBarrier)
1006 MIB.addUse(buildConstantIntReg32(MemScope, MIRBuilder, GR));
1007 MIB.addUse(MemSemanticsReg);
1008 return true;
1009}
1010
1011/// Helper function for building extended bit operations.
1013 unsigned Opcode,
1014 MachineIRBuilder &MIRBuilder,
1015 SPIRVGlobalRegistry *GR) {
1016 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1017 const auto *ST =
1018 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
1019 if ((Opcode == SPIRV::OpBitFieldInsert ||
1020 Opcode == SPIRV::OpBitFieldSExtract ||
1021 Opcode == SPIRV::OpBitFieldUExtract || Opcode == SPIRV::OpBitReverse) &&
1022 !ST->canUseExtension(SPIRV::Extension::SPV_KHR_bit_instructions)) {
1023 std::string DiagMsg = std::string(Builtin->Name) +
1024 ": the builtin requires the following SPIR-V "
1025 "extension: SPV_KHR_bit_instructions";
1026 report_fatal_error(DiagMsg.c_str(), false);
1027 }
1028
1029 // Generate SPIRV instruction accordingly.
1030 if (Call->isSpirvOp())
1031 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1032 GR->getSPIRVTypeID(Call->ReturnType));
1033
1034 auto MIB = MIRBuilder.buildInstr(Opcode)
1035 .addDef(Call->ReturnRegister)
1036 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1037 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1038 MIB.addUse(Call->Arguments[i]);
1039
1040 return true;
1041}
1042
1043/// Helper function for building Intel's bindless image instructions.
1045 unsigned Opcode,
1046 MachineIRBuilder &MIRBuilder,
1047 SPIRVGlobalRegistry *GR) {
1048 // Generate SPIRV instruction accordingly.
1049 if (Call->isSpirvOp())
1050 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1051 GR->getSPIRVTypeID(Call->ReturnType));
1052
1053 MIRBuilder.buildInstr(Opcode)
1054 .addDef(Call->ReturnRegister)
1055 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1056 .addUse(Call->Arguments[0]);
1057
1058 return true;
1059}
1060
1061/// Helper function for building Intel's OpBitwiseFunctionINTEL instruction.
1063 const SPIRV::IncomingCall *Call, unsigned Opcode,
1064 MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
1065 // Generate SPIRV instruction accordingly.
1066 if (Call->isSpirvOp())
1067 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1068 GR->getSPIRVTypeID(Call->ReturnType));
1069
1070 auto MIB = MIRBuilder.buildInstr(Opcode)
1071 .addDef(Call->ReturnRegister)
1072 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1073 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1074 MIB.addUse(Call->Arguments[i]);
1075
1076 return true;
1077}
1078
1079/// Helper function for building Intel's 2d block io instructions.
1081 unsigned Opcode,
1082 MachineIRBuilder &MIRBuilder,
1083 SPIRVGlobalRegistry *GR) {
1084 // Generate SPIRV instruction accordingly.
1085 if (Call->isSpirvOp())
1086 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
1087
1088 auto MIB = MIRBuilder.buildInstr(Opcode)
1089 .addDef(Call->ReturnRegister)
1090 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1091 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1092 MIB.addUse(Call->Arguments[i]);
1093
1094 return true;
1095}
1096
1097static bool buildPipeInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
1098 unsigned Scope, MachineIRBuilder &MIRBuilder,
1099 SPIRVGlobalRegistry *GR) {
1100 switch (Opcode) {
1101 case SPIRV::OpCommitReadPipe:
1102 case SPIRV::OpCommitWritePipe:
1103 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
1104 case SPIRV::OpGroupCommitReadPipe:
1105 case SPIRV::OpGroupCommitWritePipe:
1106 case SPIRV::OpGroupReserveReadPipePackets:
1107 case SPIRV::OpGroupReserveWritePipePackets: {
1108 Register ScopeConstReg =
1109 MIRBuilder.buildConstant(LLT::scalar(32), Scope).getReg(0);
1110 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1111 MRI->setRegClass(ScopeConstReg, &SPIRV::iIDRegClass);
1113 MIB = MIRBuilder.buildInstr(Opcode);
1114 // Add Return register and type.
1115 if (Opcode == SPIRV::OpGroupReserveReadPipePackets ||
1116 Opcode == SPIRV::OpGroupReserveWritePipePackets)
1117 MIB.addDef(Call->ReturnRegister)
1118 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1119
1120 MIB.addUse(ScopeConstReg);
1121 for (unsigned int i = 0; i < Call->Arguments.size(); ++i)
1122 MIB.addUse(Call->Arguments[i]);
1123
1124 return true;
1125 }
1126 default:
1127 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1128 GR->getSPIRVTypeID(Call->ReturnType));
1129 }
1130}
1131
1132static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim) {
1133 switch (dim) {
1134 case SPIRV::Dim::DIM_1D:
1135 case SPIRV::Dim::DIM_Buffer:
1136 return 1;
1137 case SPIRV::Dim::DIM_2D:
1138 case SPIRV::Dim::DIM_Cube:
1139 case SPIRV::Dim::DIM_Rect:
1140 return 2;
1141 case SPIRV::Dim::DIM_3D:
1142 return 3;
1143 default:
1144 report_fatal_error("Cannot get num components for given Dim");
1145 }
1146}
1147
1148/// Helper function for obtaining the number of size components.
1149static unsigned getNumSizeComponents(SPIRVType *imgType) {
1150 assert(imgType->getOpcode() == SPIRV::OpTypeImage);
1151 auto dim = static_cast<SPIRV::Dim::Dim>(imgType->getOperand(2).getImm());
1152 unsigned numComps = getNumComponentsForDim(dim);
1153 bool arrayed = imgType->getOperand(4).getImm() == 1;
1154 return arrayed ? numComps + 1 : numComps;
1155}
1156
1157//===----------------------------------------------------------------------===//
1158// Implementation functions for each builtin group
1159//===----------------------------------------------------------------------===//
1160
1162 MachineIRBuilder &MIRBuilder,
1163 SPIRVGlobalRegistry *GR, const CallBase &CB) {
1164 // Lookup the extended instruction number in the TableGen records.
1165 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1167 SPIRV::lookupExtendedBuiltin(Builtin->Name, Builtin->Set)->Number;
1168 // fmin_common and fmax_common are now deprecated, and we should use fmin and
1169 // fmax with NotInf and NotNaN flags instead. Keep original number to add
1170 // later the NoNans and NoInfs flags.
1171 uint32_t OrigNumber = Number;
1172 const SPIRVSubtarget &ST =
1173 cast<SPIRVSubtarget>(MIRBuilder.getMF().getSubtarget());
1174 if (ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2) &&
1175 (Number == SPIRV::OpenCLExtInst::fmin_common ||
1176 Number == SPIRV::OpenCLExtInst::fmax_common)) {
1177 Number = (Number == SPIRV::OpenCLExtInst::fmin_common)
1178 ? SPIRV::OpenCLExtInst::fmin
1179 : SPIRV::OpenCLExtInst::fmax;
1180 }
1181
1182 // Build extended instruction.
1183 auto MIB =
1184 MIRBuilder.buildInstr(SPIRV::OpExtInst)
1185 .addDef(Call->ReturnRegister)
1186 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1187 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
1188 .addImm(Number);
1189
1190 for (auto Argument : Call->Arguments)
1191 MIB.addUse(Argument);
1192 MIB.getInstr()->copyIRFlags(CB);
1193 if (OrigNumber == SPIRV::OpenCLExtInst::fmin_common ||
1194 OrigNumber == SPIRV::OpenCLExtInst::fmax_common) {
1195 // Add NoNans and NoInfs flags to fmin/fmax instruction.
1196 MIB.getInstr()->setFlag(MachineInstr::MIFlag::FmNoNans);
1197 MIB.getInstr()->setFlag(MachineInstr::MIFlag::FmNoInfs);
1198 }
1199 return true;
1200}
1201
1203 MachineIRBuilder &MIRBuilder,
1204 SPIRVGlobalRegistry *GR) {
1205 // Lookup the instruction opcode in the TableGen records.
1206 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1207 unsigned Opcode =
1208 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1209
1210 Register CompareRegister;
1211 SPIRVType *RelationType;
1212 std::tie(CompareRegister, RelationType) =
1213 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
1214
1215 // Build relational instruction.
1216 auto MIB = MIRBuilder.buildInstr(Opcode)
1217 .addDef(CompareRegister)
1218 .addUse(GR->getSPIRVTypeID(RelationType));
1219
1220 for (auto Argument : Call->Arguments)
1221 MIB.addUse(Argument);
1222
1223 // Build select instruction.
1224 return buildSelectInst(MIRBuilder, Call->ReturnRegister, CompareRegister,
1225 Call->ReturnType, GR);
1226}
1227
1229 MachineIRBuilder &MIRBuilder,
1230 SPIRVGlobalRegistry *GR) {
1231 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1232 const SPIRV::GroupBuiltin *GroupBuiltin =
1233 SPIRV::lookupGroupBuiltin(Builtin->Name);
1234
1235 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1236 if (Call->isSpirvOp()) {
1237 if (GroupBuiltin->NoGroupOperation) {
1239 if (GroupBuiltin->Opcode ==
1240 SPIRV::OpSubgroupMatrixMultiplyAccumulateINTEL &&
1241 Call->Arguments.size() > 4)
1242 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[4], MRI));
1243 return buildOpFromWrapper(MIRBuilder, GroupBuiltin->Opcode, Call,
1244 GR->getSPIRVTypeID(Call->ReturnType), ImmArgs);
1245 }
1246
1247 // Group Operation is a literal
1248 Register GroupOpReg = Call->Arguments[1];
1249 const MachineInstr *MI = getDefInstrMaybeConstant(GroupOpReg, MRI);
1250 if (!MI || MI->getOpcode() != TargetOpcode::G_CONSTANT)
1252 "Group Operation parameter must be an integer constant");
1253 uint64_t GrpOp = MI->getOperand(1).getCImm()->getValue().getZExtValue();
1254 Register ScopeReg = Call->Arguments[0];
1255 auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode)
1256 .addDef(Call->ReturnRegister)
1257 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1258 .addUse(ScopeReg)
1259 .addImm(GrpOp);
1260 for (unsigned i = 2; i < Call->Arguments.size(); ++i)
1261 MIB.addUse(Call->Arguments[i]);
1262 return true;
1263 }
1264
1265 Register Arg0;
1266 if (GroupBuiltin->HasBoolArg) {
1267 SPIRVType *BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder, true);
1268 Register BoolReg = Call->Arguments[0];
1269 SPIRVType *BoolRegType = GR->getSPIRVTypeForVReg(BoolReg);
1270 if (!BoolRegType)
1271 report_fatal_error("Can't find a register's type definition");
1272 MachineInstr *ArgInstruction = getDefInstrMaybeConstant(BoolReg, MRI);
1273 if (ArgInstruction->getOpcode() == TargetOpcode::G_CONSTANT) {
1274 if (BoolRegType->getOpcode() != SPIRV::OpTypeBool)
1275 Arg0 = GR->buildConstantInt(getIConstVal(BoolReg, MRI), MIRBuilder,
1276 BoolType, true);
1277 } else {
1278 if (BoolRegType->getOpcode() == SPIRV::OpTypeInt) {
1279 Arg0 = MRI->createGenericVirtualRegister(LLT::scalar(1));
1280 MRI->setRegClass(Arg0, &SPIRV::iIDRegClass);
1281 GR->assignSPIRVTypeToVReg(BoolType, Arg0, MIRBuilder.getMF());
1282 MIRBuilder.buildICmp(
1283 CmpInst::ICMP_NE, Arg0, BoolReg,
1284 GR->buildConstantInt(0, MIRBuilder, BoolRegType, true));
1285 updateRegType(Arg0, nullptr, BoolType, GR, MIRBuilder,
1286 MIRBuilder.getMF().getRegInfo());
1287 } else if (BoolRegType->getOpcode() != SPIRV::OpTypeBool) {
1288 report_fatal_error("Expect a boolean argument");
1289 }
1290 // if BoolReg is a boolean register, we don't need to do anything
1291 }
1292 }
1293
1294 Register GroupResultRegister = Call->ReturnRegister;
1295 SPIRVType *GroupResultType = Call->ReturnType;
1296
1297 // TODO: maybe we need to check whether the result type is already boolean
1298 // and in this case do not insert select instruction.
1299 const bool HasBoolReturnTy =
1300 GroupBuiltin->IsElect || GroupBuiltin->IsAllOrAny ||
1301 GroupBuiltin->IsAllEqual || GroupBuiltin->IsLogical ||
1302 GroupBuiltin->IsInverseBallot || GroupBuiltin->IsBallotBitExtract;
1303
1304 if (HasBoolReturnTy)
1305 std::tie(GroupResultRegister, GroupResultType) =
1306 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
1307
1308 auto Scope = Builtin->Name.starts_with("sub_group") ? SPIRV::Scope::Subgroup
1309 : SPIRV::Scope::Workgroup;
1310 Register ScopeRegister = buildConstantIntReg32(Scope, MIRBuilder, GR);
1311
1312 Register VecReg;
1313 if (GroupBuiltin->Opcode == SPIRV::OpGroupBroadcast &&
1314 Call->Arguments.size() > 2) {
1315 // For OpGroupBroadcast "LocalId must be an integer datatype. It must be a
1316 // scalar, a vector with 2 components, or a vector with 3 components.",
1317 // meaning that we must create a vector from the function arguments if
1318 // it's a work_group_broadcast(val, local_id_x, local_id_y) or
1319 // work_group_broadcast(val, local_id_x, local_id_y, local_id_z) call.
1320 Register ElemReg = Call->Arguments[1];
1321 SPIRVType *ElemType = GR->getSPIRVTypeForVReg(ElemReg);
1322 if (!ElemType || ElemType->getOpcode() != SPIRV::OpTypeInt)
1323 report_fatal_error("Expect an integer <LocalId> argument");
1324 unsigned VecLen = Call->Arguments.size() - 1;
1325 VecReg = MRI->createGenericVirtualRegister(
1326 LLT::fixed_vector(VecLen, MRI->getType(ElemReg)));
1327 MRI->setRegClass(VecReg, &SPIRV::vIDRegClass);
1328 SPIRVType *VecType =
1329 GR->getOrCreateSPIRVVectorType(ElemType, VecLen, MIRBuilder, true);
1330 GR->assignSPIRVTypeToVReg(VecType, VecReg, MIRBuilder.getMF());
1331 auto MIB =
1332 MIRBuilder.buildInstr(TargetOpcode::G_BUILD_VECTOR).addDef(VecReg);
1333 for (unsigned i = 1; i < Call->Arguments.size(); i++) {
1334 MIB.addUse(Call->Arguments[i]);
1335 setRegClassIfNull(Call->Arguments[i], MRI, GR);
1336 }
1337 updateRegType(VecReg, nullptr, VecType, GR, MIRBuilder,
1338 MIRBuilder.getMF().getRegInfo());
1339 }
1340
1341 // Build work/sub group instruction.
1342 auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode)
1343 .addDef(GroupResultRegister)
1344 .addUse(GR->getSPIRVTypeID(GroupResultType))
1345 .addUse(ScopeRegister);
1346
1347 if (!GroupBuiltin->NoGroupOperation)
1348 MIB.addImm(GroupBuiltin->GroupOperation);
1349 if (Call->Arguments.size() > 0) {
1350 MIB.addUse(Arg0.isValid() ? Arg0 : Call->Arguments[0]);
1351 setRegClassIfNull(Call->Arguments[0], MRI, GR);
1352 if (VecReg.isValid())
1353 MIB.addUse(VecReg);
1354 else
1355 for (unsigned i = 1; i < Call->Arguments.size(); i++)
1356 MIB.addUse(Call->Arguments[i]);
1357 }
1358
1359 // Build select instruction.
1360 if (HasBoolReturnTy)
1361 buildSelectInst(MIRBuilder, Call->ReturnRegister, GroupResultRegister,
1362 Call->ReturnType, GR);
1363 return true;
1364}
1365
1367 MachineIRBuilder &MIRBuilder,
1368 SPIRVGlobalRegistry *GR) {
1369 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1370 MachineFunction &MF = MIRBuilder.getMF();
1371 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1372 const SPIRV::IntelSubgroupsBuiltin *IntelSubgroups =
1373 SPIRV::lookupIntelSubgroupsBuiltin(Builtin->Name);
1374
1375 if (IntelSubgroups->IsMedia &&
1376 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_media_block_io)) {
1377 std::string DiagMsg = std::string(Builtin->Name) +
1378 ": the builtin requires the following SPIR-V "
1379 "extension: SPV_INTEL_media_block_io";
1380 report_fatal_error(DiagMsg.c_str(), false);
1381 } else if (!IntelSubgroups->IsMedia &&
1382 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1383 std::string DiagMsg = std::string(Builtin->Name) +
1384 ": the builtin requires the following SPIR-V "
1385 "extension: SPV_INTEL_subgroups";
1386 report_fatal_error(DiagMsg.c_str(), false);
1387 }
1388
1389 uint32_t OpCode = IntelSubgroups->Opcode;
1390 if (Call->isSpirvOp()) {
1391 bool IsSet = OpCode != SPIRV::OpSubgroupBlockWriteINTEL &&
1392 OpCode != SPIRV::OpSubgroupImageBlockWriteINTEL &&
1393 OpCode != SPIRV::OpSubgroupImageMediaBlockWriteINTEL;
1394 return buildOpFromWrapper(MIRBuilder, OpCode, Call,
1395 IsSet ? GR->getSPIRVTypeID(Call->ReturnType)
1396 : Register(0));
1397 }
1398
1399 if (IntelSubgroups->IsBlock) {
1400 // Minimal number or arguments set in TableGen records is 1
1401 if (SPIRVType *Arg0Type = GR->getSPIRVTypeForVReg(Call->Arguments[0])) {
1402 if (Arg0Type->getOpcode() == SPIRV::OpTypeImage) {
1403 // TODO: add required validation from the specification:
1404 // "'Image' must be an object whose type is OpTypeImage with a 'Sampled'
1405 // operand of 0 or 2. If the 'Sampled' operand is 2, then some
1406 // dimensions require a capability."
1407 switch (OpCode) {
1408 case SPIRV::OpSubgroupBlockReadINTEL:
1409 OpCode = SPIRV::OpSubgroupImageBlockReadINTEL;
1410 break;
1411 case SPIRV::OpSubgroupBlockWriteINTEL:
1412 OpCode = SPIRV::OpSubgroupImageBlockWriteINTEL;
1413 break;
1414 }
1415 }
1416 }
1417 }
1418
1419 // TODO: opaque pointers types should be eventually resolved in such a way
1420 // that validation of block read is enabled with respect to the following
1421 // specification requirement:
1422 // "'Result Type' may be a scalar or vector type, and its component type must
1423 // be equal to the type pointed to by 'Ptr'."
1424 // For example, function parameter type should not be default i8 pointer, but
1425 // depend on the result type of the instruction where it is used as a pointer
1426 // argument of OpSubgroupBlockReadINTEL
1427
1428 // Build Intel subgroups instruction
1430 IntelSubgroups->IsWrite
1431 ? MIRBuilder.buildInstr(OpCode)
1432 : MIRBuilder.buildInstr(OpCode)
1433 .addDef(Call->ReturnRegister)
1434 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1435 for (size_t i = 0; i < Call->Arguments.size(); ++i)
1436 MIB.addUse(Call->Arguments[i]);
1437 return true;
1438}
1439
1441 MachineIRBuilder &MIRBuilder,
1442 SPIRVGlobalRegistry *GR) {
1443 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1444 MachineFunction &MF = MIRBuilder.getMF();
1445 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1446 if (!ST->canUseExtension(
1447 SPIRV::Extension::SPV_KHR_uniform_group_instructions)) {
1448 std::string DiagMsg = std::string(Builtin->Name) +
1449 ": the builtin requires the following SPIR-V "
1450 "extension: SPV_KHR_uniform_group_instructions";
1451 report_fatal_error(DiagMsg.c_str(), false);
1452 }
1453 const SPIRV::GroupUniformBuiltin *GroupUniform =
1454 SPIRV::lookupGroupUniformBuiltin(Builtin->Name);
1455 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1456
1457 Register GroupResultReg = Call->ReturnRegister;
1458 Register ScopeReg = Call->Arguments[0];
1459 Register ValueReg = Call->Arguments[2];
1460
1461 // Group Operation
1462 Register ConstGroupOpReg = Call->Arguments[1];
1463 const MachineInstr *Const = getDefInstrMaybeConstant(ConstGroupOpReg, MRI);
1464 if (!Const || Const->getOpcode() != TargetOpcode::G_CONSTANT)
1466 "expect a constant group operation for a uniform group instruction",
1467 false);
1468 const MachineOperand &ConstOperand = Const->getOperand(1);
1469 if (!ConstOperand.isCImm())
1470 report_fatal_error("uniform group instructions: group operation must be an "
1471 "integer constant",
1472 false);
1473
1474 auto MIB = MIRBuilder.buildInstr(GroupUniform->Opcode)
1475 .addDef(GroupResultReg)
1476 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1477 .addUse(ScopeReg);
1478 addNumImm(ConstOperand.getCImm()->getValue(), MIB);
1479 MIB.addUse(ValueReg);
1480
1481 return true;
1482}
1483
1485 MachineIRBuilder &MIRBuilder,
1486 SPIRVGlobalRegistry *GR) {
1487 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1488 MachineFunction &MF = MIRBuilder.getMF();
1489 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1490 if (!ST->canUseExtension(SPIRV::Extension::SPV_KHR_shader_clock)) {
1491 std::string DiagMsg = std::string(Builtin->Name) +
1492 ": the builtin requires the following SPIR-V "
1493 "extension: SPV_KHR_shader_clock";
1494 report_fatal_error(DiagMsg.c_str(), false);
1495 }
1496
1497 Register ResultReg = Call->ReturnRegister;
1498
1499 if (Builtin->Name == "__spirv_ReadClockKHR") {
1500 MIRBuilder.buildInstr(SPIRV::OpReadClockKHR)
1501 .addDef(ResultReg)
1502 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1503 .addUse(Call->Arguments[0]);
1504 } else {
1505 // Deduce the `Scope` operand from the builtin function name.
1506 SPIRV::Scope::Scope ScopeArg =
1508 .EndsWith("device", SPIRV::Scope::Scope::Device)
1509 .EndsWith("work_group", SPIRV::Scope::Scope::Workgroup)
1510 .EndsWith("sub_group", SPIRV::Scope::Scope::Subgroup);
1511 Register ScopeReg = buildConstantIntReg32(ScopeArg, MIRBuilder, GR);
1512
1513 MIRBuilder.buildInstr(SPIRV::OpReadClockKHR)
1514 .addDef(ResultReg)
1515 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1516 .addUse(ScopeReg);
1517 }
1518
1519 return true;
1520}
1521
1522// These queries ask for a single size_t result for a given dimension index,
1523// e.g. size_t get_global_id(uint dimindex). In SPIR-V, the builtins
1524// corresponding to these values are all vec3 types, so we need to extract the
1525// correct index or return DefaultValue (0 or 1 depending on the query). We also
1526// handle extending or truncating in case size_t does not match the expected
1527// result type's bitwidth.
1528//
1529// For a constant index >= 3 we generate:
1530// %res = OpConstant %SizeT DefaultValue
1531//
1532// For other indices we generate:
1533// %g = OpVariable %ptr_V3_SizeT Input
1534// OpDecorate %g BuiltIn XXX
1535// OpDecorate %g LinkageAttributes "__spirv_BuiltInXXX"
1536// OpDecorate %g Constant
1537// %loadedVec = OpLoad %V3_SizeT %g
1538//
1539// Then, if the index is constant < 3, we generate:
1540// %res = OpCompositeExtract %SizeT %loadedVec idx
1541// If the index is dynamic, we generate:
1542// %tmp = OpVectorExtractDynamic %SizeT %loadedVec %idx
1543// %cmp = OpULessThan %bool %idx %const_3
1544// %res = OpSelect %SizeT %cmp %tmp %const_<DefaultValue>
1545//
1546// If the bitwidth of %res does not match the expected return type, we add an
1547// extend or truncate.
1549 MachineIRBuilder &MIRBuilder,
1551 SPIRV::BuiltIn::BuiltIn BuiltinValue,
1552 uint64_t DefaultValue) {
1553 Register IndexRegister = Call->Arguments[0];
1554 const unsigned ResultWidth = Call->ReturnType->getOperand(1).getImm();
1555 const unsigned PointerSize = GR->getPointerSize();
1556 const SPIRVType *PointerSizeType =
1557 GR->getOrCreateSPIRVIntegerType(PointerSize, MIRBuilder);
1558 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1559 auto IndexInstruction = getDefInstrMaybeConstant(IndexRegister, MRI);
1560
1561 // Set up the final register to do truncation or extension on at the end.
1562 Register ToTruncate = Call->ReturnRegister;
1563
1564 // If the index is constant, we can statically determine if it is in range.
1565 bool IsConstantIndex =
1566 IndexInstruction->getOpcode() == TargetOpcode::G_CONSTANT;
1567
1568 // If it's out of range (max dimension is 3), we can just return the constant
1569 // default value (0 or 1 depending on which query function).
1570 if (IsConstantIndex && getIConstVal(IndexRegister, MRI) >= 3) {
1571 Register DefaultReg = Call->ReturnRegister;
1572 if (PointerSize != ResultWidth) {
1573 DefaultReg = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1574 MRI->setRegClass(DefaultReg, &SPIRV::iIDRegClass);
1575 GR->assignSPIRVTypeToVReg(PointerSizeType, DefaultReg,
1576 MIRBuilder.getMF());
1577 ToTruncate = DefaultReg;
1578 }
1579 auto NewRegister =
1580 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType, true);
1581 MIRBuilder.buildCopy(DefaultReg, NewRegister);
1582 } else { // If it could be in range, we need to load from the given builtin.
1583 auto Vec3Ty =
1584 GR->getOrCreateSPIRVVectorType(PointerSizeType, 3, MIRBuilder, true);
1585 Register LoadedVector =
1586 buildBuiltinVariableLoad(MIRBuilder, Vec3Ty, GR, BuiltinValue,
1587 LLT::fixed_vector(3, PointerSize));
1588 // Set up the vreg to extract the result to (possibly a new temporary one).
1589 Register Extracted = Call->ReturnRegister;
1590 if (!IsConstantIndex || PointerSize != ResultWidth) {
1591 Extracted = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1592 MRI->setRegClass(Extracted, &SPIRV::iIDRegClass);
1593 GR->assignSPIRVTypeToVReg(PointerSizeType, Extracted, MIRBuilder.getMF());
1594 }
1595 // Use Intrinsic::spv_extractelt so dynamic vs static extraction is
1596 // handled later: extr = spv_extractelt LoadedVector, IndexRegister.
1597 MachineInstrBuilder ExtractInst = MIRBuilder.buildIntrinsic(
1598 Intrinsic::spv_extractelt, ArrayRef<Register>{Extracted}, true, false);
1599 ExtractInst.addUse(LoadedVector).addUse(IndexRegister);
1600
1601 // If the index is dynamic, need check if it's < 3, and then use a select.
1602 if (!IsConstantIndex) {
1603 updateRegType(Extracted, nullptr, PointerSizeType, GR, MIRBuilder, *MRI);
1604
1605 auto IndexType = GR->getSPIRVTypeForVReg(IndexRegister);
1606 auto BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder, true);
1607
1608 Register CompareRegister =
1609 MRI->createGenericVirtualRegister(LLT::scalar(1));
1610 MRI->setRegClass(CompareRegister, &SPIRV::iIDRegClass);
1611 GR->assignSPIRVTypeToVReg(BoolType, CompareRegister, MIRBuilder.getMF());
1612
1613 // Use G_ICMP to check if idxVReg < 3.
1614 MIRBuilder.buildICmp(
1615 CmpInst::ICMP_ULT, CompareRegister, IndexRegister,
1616 GR->buildConstantInt(3, MIRBuilder, IndexType, true));
1617
1618 // Get constant for the default value (0 or 1 depending on which
1619 // function).
1620 Register DefaultRegister =
1621 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType, true);
1622
1623 // Get a register for the selection result (possibly a new temporary one).
1624 Register SelectionResult = Call->ReturnRegister;
1625 if (PointerSize != ResultWidth) {
1626 SelectionResult =
1627 MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1628 MRI->setRegClass(SelectionResult, &SPIRV::iIDRegClass);
1629 GR->assignSPIRVTypeToVReg(PointerSizeType, SelectionResult,
1630 MIRBuilder.getMF());
1631 }
1632 // Create the final G_SELECT to return the extracted value or the default.
1633 MIRBuilder.buildSelect(SelectionResult, CompareRegister, Extracted,
1634 DefaultRegister);
1635 ToTruncate = SelectionResult;
1636 } else {
1637 ToTruncate = Extracted;
1638 }
1639 }
1640 // Alter the result's bitwidth if it does not match the SizeT value extracted.
1641 if (PointerSize != ResultWidth)
1642 MIRBuilder.buildZExtOrTrunc(Call->ReturnRegister, ToTruncate);
1643 return true;
1644}
1645
1647 MachineIRBuilder &MIRBuilder,
1648 SPIRVGlobalRegistry *GR) {
1649 // Lookup the builtin variable record.
1650 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1651 SPIRV::BuiltIn::BuiltIn Value =
1652 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1653
1654 if (Value == SPIRV::BuiltIn::GlobalInvocationId)
1655 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, 0);
1656
1657 // Build a load instruction for the builtin variable.
1658 unsigned BitWidth = GR->getScalarOrVectorBitWidth(Call->ReturnType);
1659 LLT LLType;
1660 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeVector)
1661 LLType =
1662 LLT::fixed_vector(Call->ReturnType->getOperand(2).getImm(), BitWidth);
1663 else
1664 LLType = LLT::scalar(BitWidth);
1665
1666 return buildBuiltinVariableLoad(MIRBuilder, Call->ReturnType, GR, Value,
1667 LLType, Call->ReturnRegister);
1668}
1669
1671 MachineIRBuilder &MIRBuilder,
1672 SPIRVGlobalRegistry *GR) {
1673 // Lookup the instruction opcode in the TableGen records.
1674 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1675 unsigned Opcode =
1676 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1677
1678 switch (Opcode) {
1679 case SPIRV::OpStore:
1680 return buildAtomicInitInst(Call, MIRBuilder);
1681 case SPIRV::OpAtomicLoad:
1682 return buildAtomicLoadInst(Call, MIRBuilder, GR);
1683 case SPIRV::OpAtomicStore:
1684 return buildAtomicStoreInst(Call, MIRBuilder, GR);
1685 case SPIRV::OpAtomicCompareExchange:
1686 case SPIRV::OpAtomicCompareExchangeWeak:
1687 return buildAtomicCompareExchangeInst(Call, Builtin, Opcode, MIRBuilder,
1688 GR);
1689 case SPIRV::OpAtomicIAdd:
1690 case SPIRV::OpAtomicISub:
1691 case SPIRV::OpAtomicOr:
1692 case SPIRV::OpAtomicXor:
1693 case SPIRV::OpAtomicAnd:
1694 case SPIRV::OpAtomicExchange:
1695 return buildAtomicRMWInst(Call, Opcode, MIRBuilder, GR);
1696 case SPIRV::OpMemoryBarrier:
1697 return buildBarrierInst(Call, SPIRV::OpMemoryBarrier, MIRBuilder, GR);
1698 case SPIRV::OpAtomicFlagTestAndSet:
1699 case SPIRV::OpAtomicFlagClear:
1700 return buildAtomicFlagInst(Call, Opcode, MIRBuilder, GR);
1701 default:
1702 if (Call->isSpirvOp())
1703 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1704 GR->getSPIRVTypeID(Call->ReturnType));
1705 return false;
1706 }
1707}
1708
1710 MachineIRBuilder &MIRBuilder,
1711 SPIRVGlobalRegistry *GR) {
1712 // Lookup the instruction opcode in the TableGen records.
1713 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1714 unsigned Opcode = SPIRV::lookupAtomicFloatingBuiltin(Builtin->Name)->Opcode;
1715
1716 switch (Opcode) {
1717 case SPIRV::OpAtomicFAddEXT:
1718 case SPIRV::OpAtomicFMinEXT:
1719 case SPIRV::OpAtomicFMaxEXT:
1720 return buildAtomicFloatingRMWInst(Call, Opcode, MIRBuilder, GR);
1721 default:
1722 return false;
1723 }
1724}
1725
1727 MachineIRBuilder &MIRBuilder,
1728 SPIRVGlobalRegistry *GR) {
1729 // Lookup the instruction opcode in the TableGen records.
1730 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1731 unsigned Opcode =
1732 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1733
1734 return buildBarrierInst(Call, Opcode, MIRBuilder, GR);
1735}
1736
1738 MachineIRBuilder &MIRBuilder,
1739 SPIRVGlobalRegistry *GR) {
1740 // Lookup the instruction opcode in the TableGen records.
1741 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1742 unsigned Opcode =
1743 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1744
1745 if (Opcode == SPIRV::OpGenericCastToPtrExplicit) {
1746 SPIRV::StorageClass::StorageClass ResSC =
1747 GR->getPointerStorageClass(Call->ReturnRegister);
1748 if (!isGenericCastablePtr(ResSC))
1749 return false;
1750
1751 MIRBuilder.buildInstr(Opcode)
1752 .addDef(Call->ReturnRegister)
1753 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1754 .addUse(Call->Arguments[0])
1755 .addImm(ResSC);
1756 } else {
1757 MIRBuilder.buildInstr(TargetOpcode::G_ADDRSPACE_CAST)
1758 .addDef(Call->ReturnRegister)
1759 .addUse(Call->Arguments[0]);
1760 }
1761 return true;
1762}
1763
1764static bool generateDotOrFMulInst(const StringRef DemangledCall,
1766 MachineIRBuilder &MIRBuilder,
1767 SPIRVGlobalRegistry *GR) {
1768 if (Call->isSpirvOp())
1769 return buildOpFromWrapper(MIRBuilder, SPIRV::OpDot, Call,
1770 GR->getSPIRVTypeID(Call->ReturnType));
1771
1772 bool IsVec = GR->getSPIRVTypeForVReg(Call->Arguments[0])->getOpcode() ==
1773 SPIRV::OpTypeVector;
1774 // Use OpDot only in case of vector args and OpFMul in case of scalar args.
1775 uint32_t OC = IsVec ? SPIRV::OpDot : SPIRV::OpFMulS;
1776 bool IsSwapReq = false;
1777
1778 const auto *ST =
1779 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
1780 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt) &&
1781 (ST->canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
1782 ST->isAtLeastSPIRVVer(VersionTuple(1, 6)))) {
1783 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1784 const SPIRV::IntegerDotProductBuiltin *IntDot =
1785 SPIRV::lookupIntegerDotProductBuiltin(Builtin->Name);
1786 if (IntDot) {
1787 OC = IntDot->Opcode;
1788 IsSwapReq = IntDot->IsSwapReq;
1789 } else if (IsVec) {
1790 // Handling "dot" and "dot_acc_sat" builtins which use vectors of
1791 // integers.
1792 LLVMContext &Ctx = MIRBuilder.getContext();
1794 SPIRV::parseBuiltinTypeStr(TypeStrs, DemangledCall, Ctx);
1795 bool IsFirstSigned = TypeStrs[0].trim()[0] != 'u';
1796 bool IsSecondSigned = TypeStrs[1].trim()[0] != 'u';
1797
1798 if (Call->BuiltinName == "dot") {
1799 if (IsFirstSigned && IsSecondSigned)
1800 OC = SPIRV::OpSDot;
1801 else if (!IsFirstSigned && !IsSecondSigned)
1802 OC = SPIRV::OpUDot;
1803 else {
1804 OC = SPIRV::OpSUDot;
1805 if (!IsFirstSigned)
1806 IsSwapReq = true;
1807 }
1808 } else if (Call->BuiltinName == "dot_acc_sat") {
1809 if (IsFirstSigned && IsSecondSigned)
1810 OC = SPIRV::OpSDotAccSat;
1811 else if (!IsFirstSigned && !IsSecondSigned)
1812 OC = SPIRV::OpUDotAccSat;
1813 else {
1814 OC = SPIRV::OpSUDotAccSat;
1815 if (!IsFirstSigned)
1816 IsSwapReq = true;
1817 }
1818 }
1819 }
1820 }
1821
1822 MachineInstrBuilder MIB = MIRBuilder.buildInstr(OC)
1823 .addDef(Call->ReturnRegister)
1824 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1825
1826 if (IsSwapReq) {
1827 MIB.addUse(Call->Arguments[1]);
1828 MIB.addUse(Call->Arguments[0]);
1829 // needed for dot_acc_sat* builtins
1830 for (size_t i = 2; i < Call->Arguments.size(); ++i)
1831 MIB.addUse(Call->Arguments[i]);
1832 } else {
1833 for (size_t i = 0; i < Call->Arguments.size(); ++i)
1834 MIB.addUse(Call->Arguments[i]);
1835 }
1836
1837 // Add Packed Vector Format for Integer dot product builtins if arguments are
1838 // scalar
1839 if (!IsVec && OC != SPIRV::OpFMulS)
1840 MIB.addImm(SPIRV::PackedVectorFormat4x8Bit);
1841
1842 return true;
1843}
1844
1846 MachineIRBuilder &MIRBuilder,
1847 SPIRVGlobalRegistry *GR) {
1848 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1849 SPIRV::BuiltIn::BuiltIn Value =
1850 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1851
1852 // For now, we only support a single Wave intrinsic with a single return type.
1853 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt);
1854 LLT LLType = LLT::scalar(GR->getScalarOrVectorBitWidth(Call->ReturnType));
1855
1857 MIRBuilder, Call->ReturnType, GR, Value, LLType, Call->ReturnRegister,
1858 /* isConst= */ false, /* LinkageType= */ std::nullopt);
1859}
1860
1861// We expect a builtin
1862// Name(ptr sret([RetType]) %result, Type %operand1, Type %operand1)
1863// where %result is a pointer to where the result of the builtin execution
1864// is to be stored, and generate the following instructions:
1865// Res = Opcode RetType Operand1 Operand1
1866// OpStore RetVariable Res
1868 MachineIRBuilder &MIRBuilder,
1869 SPIRVGlobalRegistry *GR) {
1870 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1871 unsigned Opcode =
1872 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1873
1874 Register SRetReg = Call->Arguments[0];
1875 SPIRVType *PtrRetType = GR->getSPIRVTypeForVReg(SRetReg);
1876 SPIRVType *RetType = GR->getPointeeType(PtrRetType);
1877 if (!RetType)
1878 report_fatal_error("The first parameter must be a pointer");
1879 if (RetType->getOpcode() != SPIRV::OpTypeStruct)
1880 report_fatal_error("Expected struct type result for the arithmetic with "
1881 "overflow builtins");
1882
1883 SPIRVType *OpType1 = GR->getSPIRVTypeForVReg(Call->Arguments[1]);
1884 SPIRVType *OpType2 = GR->getSPIRVTypeForVReg(Call->Arguments[2]);
1885 if (!OpType1 || !OpType2 || OpType1 != OpType2)
1886 report_fatal_error("Operands must have the same type");
1887 if (OpType1->getOpcode() == SPIRV::OpTypeVector)
1888 switch (Opcode) {
1889 case SPIRV::OpIAddCarryS:
1890 Opcode = SPIRV::OpIAddCarryV;
1891 break;
1892 case SPIRV::OpISubBorrowS:
1893 Opcode = SPIRV::OpISubBorrowV;
1894 break;
1895 }
1896
1897 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1898 Register ResReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1899 if (const TargetRegisterClass *DstRC =
1900 MRI->getRegClassOrNull(Call->Arguments[1])) {
1901 MRI->setRegClass(ResReg, DstRC);
1902 MRI->setType(ResReg, MRI->getType(Call->Arguments[1]));
1903 } else {
1904 MRI->setType(ResReg, LLT::scalar(64));
1905 }
1906 GR->assignSPIRVTypeToVReg(RetType, ResReg, MIRBuilder.getMF());
1907 MIRBuilder.buildInstr(Opcode)
1908 .addDef(ResReg)
1909 .addUse(GR->getSPIRVTypeID(RetType))
1910 .addUse(Call->Arguments[1])
1911 .addUse(Call->Arguments[2]);
1912 MIRBuilder.buildInstr(SPIRV::OpStore).addUse(SRetReg).addUse(ResReg);
1913 return true;
1914}
1915
1917 MachineIRBuilder &MIRBuilder,
1918 SPIRVGlobalRegistry *GR) {
1919 // Lookup the builtin record.
1920 SPIRV::BuiltIn::BuiltIn Value =
1921 SPIRV::lookupGetBuiltin(Call->Builtin->Name, Call->Builtin->Set)->Value;
1922 const bool IsDefaultOne = (Value == SPIRV::BuiltIn::GlobalSize ||
1923 Value == SPIRV::BuiltIn::NumWorkgroups ||
1924 Value == SPIRV::BuiltIn::WorkgroupSize ||
1925 Value == SPIRV::BuiltIn::EnqueuedWorkgroupSize);
1926 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, IsDefaultOne ? 1 : 0);
1927}
1928
1930 MachineIRBuilder &MIRBuilder,
1931 SPIRVGlobalRegistry *GR) {
1932 // Lookup the image size query component number in the TableGen records.
1933 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1934 uint32_t Component =
1935 SPIRV::lookupImageQueryBuiltin(Builtin->Name, Builtin->Set)->Component;
1936 // Query result may either be a vector or a scalar. If return type is not a
1937 // vector, expect only a single size component. Otherwise get the number of
1938 // expected components.
1939 unsigned NumExpectedRetComponents =
1940 Call->ReturnType->getOpcode() == SPIRV::OpTypeVector
1941 ? Call->ReturnType->getOperand(2).getImm()
1942 : 1;
1943 // Get the actual number of query result/size components.
1944 SPIRVType *ImgType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
1945 unsigned NumActualRetComponents = getNumSizeComponents(ImgType);
1946 Register QueryResult = Call->ReturnRegister;
1947 SPIRVType *QueryResultType = Call->ReturnType;
1948 if (NumExpectedRetComponents != NumActualRetComponents) {
1949 unsigned Bitwidth = Call->ReturnType->getOpcode() == SPIRV::OpTypeInt
1950 ? Call->ReturnType->getOperand(1).getImm()
1951 : 32;
1952 QueryResult = MIRBuilder.getMRI()->createGenericVirtualRegister(
1953 LLT::fixed_vector(NumActualRetComponents, Bitwidth));
1954 MIRBuilder.getMRI()->setRegClass(QueryResult, &SPIRV::vIDRegClass);
1955 SPIRVType *IntTy = GR->getOrCreateSPIRVIntegerType(Bitwidth, MIRBuilder);
1956 QueryResultType = GR->getOrCreateSPIRVVectorType(
1957 IntTy, NumActualRetComponents, MIRBuilder, true);
1958 GR->assignSPIRVTypeToVReg(QueryResultType, QueryResult, MIRBuilder.getMF());
1959 }
1960 bool IsDimBuf = ImgType->getOperand(2).getImm() == SPIRV::Dim::DIM_Buffer;
1961 unsigned Opcode =
1962 IsDimBuf ? SPIRV::OpImageQuerySize : SPIRV::OpImageQuerySizeLod;
1963 auto MIB = MIRBuilder.buildInstr(Opcode)
1964 .addDef(QueryResult)
1965 .addUse(GR->getSPIRVTypeID(QueryResultType))
1966 .addUse(Call->Arguments[0]);
1967 if (!IsDimBuf)
1968 MIB.addUse(buildConstantIntReg32(0, MIRBuilder, GR)); // Lod id.
1969 if (NumExpectedRetComponents == NumActualRetComponents)
1970 return true;
1971 if (NumExpectedRetComponents == 1) {
1972 // Only 1 component is expected, build OpCompositeExtract instruction.
1973 unsigned ExtractedComposite =
1974 Component == 3 ? NumActualRetComponents - 1 : Component;
1975 assert(ExtractedComposite < NumActualRetComponents &&
1976 "Invalid composite index!");
1977 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
1978 SPIRVType *NewType = nullptr;
1979 if (QueryResultType->getOpcode() == SPIRV::OpTypeVector) {
1980 Register NewTypeReg = QueryResultType->getOperand(1).getReg();
1981 if (TypeReg != NewTypeReg &&
1982 (NewType = GR->getSPIRVTypeForVReg(NewTypeReg)) != nullptr)
1983 TypeReg = NewTypeReg;
1984 }
1985 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
1986 .addDef(Call->ReturnRegister)
1987 .addUse(TypeReg)
1988 .addUse(QueryResult)
1989 .addImm(ExtractedComposite);
1990 if (NewType != nullptr)
1991 updateRegType(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder,
1992 MIRBuilder.getMF().getRegInfo());
1993 } else {
1994 // More than 1 component is expected, fill a new vector.
1995 auto MIB = MIRBuilder.buildInstr(SPIRV::OpVectorShuffle)
1996 .addDef(Call->ReturnRegister)
1997 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1998 .addUse(QueryResult)
1999 .addUse(QueryResult);
2000 for (unsigned i = 0; i < NumExpectedRetComponents; ++i)
2001 MIB.addImm(i < NumActualRetComponents ? i : 0xffffffff);
2002 }
2003 return true;
2004}
2005
2007 MachineIRBuilder &MIRBuilder,
2008 SPIRVGlobalRegistry *GR) {
2009 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt &&
2010 "Image samples query result must be of int type!");
2011
2012 // Lookup the instruction opcode in the TableGen records.
2013 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2014 unsigned Opcode =
2015 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2016
2017 Register Image = Call->Arguments[0];
2018 SPIRV::Dim::Dim ImageDimensionality = static_cast<SPIRV::Dim::Dim>(
2019 GR->getSPIRVTypeForVReg(Image)->getOperand(2).getImm());
2020 (void)ImageDimensionality;
2021
2022 switch (Opcode) {
2023 case SPIRV::OpImageQuerySamples:
2024 assert(ImageDimensionality == SPIRV::Dim::DIM_2D &&
2025 "Image must be of 2D dimensionality");
2026 break;
2027 case SPIRV::OpImageQueryLevels:
2028 assert((ImageDimensionality == SPIRV::Dim::DIM_1D ||
2029 ImageDimensionality == SPIRV::Dim::DIM_2D ||
2030 ImageDimensionality == SPIRV::Dim::DIM_3D ||
2031 ImageDimensionality == SPIRV::Dim::DIM_Cube) &&
2032 "Image must be of 1D/2D/3D/Cube dimensionality");
2033 break;
2034 }
2035
2036 MIRBuilder.buildInstr(Opcode)
2037 .addDef(Call->ReturnRegister)
2038 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2039 .addUse(Image);
2040 return true;
2041}
2042
2043// TODO: Move to TableGen.
2044static SPIRV::SamplerAddressingMode::SamplerAddressingMode
2046 switch (Bitmask & SPIRV::CLK_ADDRESS_MODE_MASK) {
2047 case SPIRV::CLK_ADDRESS_CLAMP:
2048 return SPIRV::SamplerAddressingMode::Clamp;
2049 case SPIRV::CLK_ADDRESS_CLAMP_TO_EDGE:
2050 return SPIRV::SamplerAddressingMode::ClampToEdge;
2051 case SPIRV::CLK_ADDRESS_REPEAT:
2052 return SPIRV::SamplerAddressingMode::Repeat;
2053 case SPIRV::CLK_ADDRESS_MIRRORED_REPEAT:
2054 return SPIRV::SamplerAddressingMode::RepeatMirrored;
2055 case SPIRV::CLK_ADDRESS_NONE:
2056 return SPIRV::SamplerAddressingMode::None;
2057 default:
2058 report_fatal_error("Unknown CL address mode");
2059 }
2060}
2061
2062static unsigned getSamplerParamFromBitmask(unsigned Bitmask) {
2063 return (Bitmask & SPIRV::CLK_NORMALIZED_COORDS_TRUE) ? 1 : 0;
2064}
2065
2066static SPIRV::SamplerFilterMode::SamplerFilterMode
2068 if (Bitmask & SPIRV::CLK_FILTER_LINEAR)
2069 return SPIRV::SamplerFilterMode::Linear;
2070 if (Bitmask & SPIRV::CLK_FILTER_NEAREST)
2071 return SPIRV::SamplerFilterMode::Nearest;
2072 return SPIRV::SamplerFilterMode::Nearest;
2073}
2074
2075static bool generateReadImageInst(const StringRef DemangledCall,
2077 MachineIRBuilder &MIRBuilder,
2078 SPIRVGlobalRegistry *GR) {
2079 if (Call->isSpirvOp())
2080 return buildOpFromWrapper(MIRBuilder, SPIRV::OpImageRead, Call,
2081 GR->getSPIRVTypeID(Call->ReturnType));
2082 Register Image = Call->Arguments[0];
2083 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2084 bool HasOclSampler = DemangledCall.contains_insensitive("ocl_sampler");
2085 bool HasMsaa = DemangledCall.contains_insensitive("msaa");
2086 if (HasOclSampler) {
2087 Register Sampler = Call->Arguments[1];
2088
2089 if (!GR->isScalarOfType(Sampler, SPIRV::OpTypeSampler) &&
2090 getDefInstrMaybeConstant(Sampler, MRI)->getOperand(1).isCImm()) {
2091 uint64_t SamplerMask = getIConstVal(Sampler, MRI);
2092 Sampler = GR->buildConstantSampler(
2094 getSamplerParamFromBitmask(SamplerMask),
2095 getSamplerFilterModeFromBitmask(SamplerMask), MIRBuilder);
2096 }
2097 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
2098 SPIRVType *SampledImageType =
2099 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
2100 Register SampledImage = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2101
2102 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
2103 .addDef(SampledImage)
2104 .addUse(GR->getSPIRVTypeID(SampledImageType))
2105 .addUse(Image)
2106 .addUse(Sampler);
2107
2109 MIRBuilder);
2110
2111 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeVector) {
2112 SPIRVType *TempType =
2113 GR->getOrCreateSPIRVVectorType(Call->ReturnType, 4, MIRBuilder, true);
2114 Register TempRegister =
2115 MRI->createGenericVirtualRegister(GR->getRegType(TempType));
2116 MRI->setRegClass(TempRegister, GR->getRegClass(TempType));
2117 GR->assignSPIRVTypeToVReg(TempType, TempRegister, MIRBuilder.getMF());
2118 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
2119 .addDef(TempRegister)
2120 .addUse(GR->getSPIRVTypeID(TempType))
2121 .addUse(SampledImage)
2122 .addUse(Call->Arguments[2]) // Coordinate.
2123 .addImm(SPIRV::ImageOperand::Lod)
2124 .addUse(Lod);
2125 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
2126 .addDef(Call->ReturnRegister)
2127 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2128 .addUse(TempRegister)
2129 .addImm(0);
2130 } else {
2131 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
2132 .addDef(Call->ReturnRegister)
2133 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2134 .addUse(SampledImage)
2135 .addUse(Call->Arguments[2]) // Coordinate.
2136 .addImm(SPIRV::ImageOperand::Lod)
2137 .addUse(Lod);
2138 }
2139 } else if (HasMsaa) {
2140 MIRBuilder.buildInstr(SPIRV::OpImageRead)
2141 .addDef(Call->ReturnRegister)
2142 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2143 .addUse(Image)
2144 .addUse(Call->Arguments[1]) // Coordinate.
2145 .addImm(SPIRV::ImageOperand::Sample)
2146 .addUse(Call->Arguments[2]);
2147 } else {
2148 MIRBuilder.buildInstr(SPIRV::OpImageRead)
2149 .addDef(Call->ReturnRegister)
2150 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2151 .addUse(Image)
2152 .addUse(Call->Arguments[1]); // Coordinate.
2153 }
2154 return true;
2155}
2156
2158 MachineIRBuilder &MIRBuilder,
2159 SPIRVGlobalRegistry *GR) {
2160 if (Call->isSpirvOp())
2161 return buildOpFromWrapper(MIRBuilder, SPIRV::OpImageWrite, Call,
2162 Register(0));
2163 MIRBuilder.buildInstr(SPIRV::OpImageWrite)
2164 .addUse(Call->Arguments[0]) // Image.
2165 .addUse(Call->Arguments[1]) // Coordinate.
2166 .addUse(Call->Arguments[2]); // Texel.
2167 return true;
2168}
2169
2170static bool generateSampleImageInst(const StringRef DemangledCall,
2172 MachineIRBuilder &MIRBuilder,
2173 SPIRVGlobalRegistry *GR) {
2174 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2175 if (Call->Builtin->Name.contains_insensitive(
2176 "__translate_sampler_initializer")) {
2177 // Build sampler literal.
2178 uint64_t Bitmask = getIConstVal(Call->Arguments[0], MRI);
2179 Register Sampler = GR->buildConstantSampler(
2180 Call->ReturnRegister, getSamplerAddressingModeFromBitmask(Bitmask),
2182 getSamplerFilterModeFromBitmask(Bitmask), MIRBuilder);
2183 return Sampler.isValid();
2184 } else if (Call->Builtin->Name.contains_insensitive("__spirv_SampledImage")) {
2185 // Create OpSampledImage.
2186 Register Image = Call->Arguments[0];
2187 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
2188 SPIRVType *SampledImageType =
2189 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
2190 Register SampledImage =
2191 Call->ReturnRegister.isValid()
2192 ? Call->ReturnRegister
2193 : MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2194 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
2195 .addDef(SampledImage)
2196 .addUse(GR->getSPIRVTypeID(SampledImageType))
2197 .addUse(Image)
2198 .addUse(Call->Arguments[1]); // Sampler.
2199 return true;
2200 } else if (Call->Builtin->Name.contains_insensitive(
2201 "__spirv_ImageSampleExplicitLod")) {
2202 // Sample an image using an explicit level of detail.
2203 std::string ReturnType = DemangledCall.str();
2204 if (DemangledCall.contains("_R")) {
2205 ReturnType = ReturnType.substr(ReturnType.find("_R") + 2);
2206 ReturnType = ReturnType.substr(0, ReturnType.find('('));
2207 }
2208 SPIRVType *Type =
2209 Call->ReturnType
2210 ? Call->ReturnType
2211 : GR->getOrCreateSPIRVTypeByName(ReturnType, MIRBuilder, true);
2212 if (!Type) {
2213 std::string DiagMsg =
2214 "Unable to recognize SPIRV type name: " + ReturnType;
2215 report_fatal_error(DiagMsg.c_str());
2216 }
2217 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
2218 .addDef(Call->ReturnRegister)
2220 .addUse(Call->Arguments[0]) // Image.
2221 .addUse(Call->Arguments[1]) // Coordinate.
2222 .addImm(SPIRV::ImageOperand::Lod)
2223 .addUse(Call->Arguments[3]);
2224 return true;
2225 }
2226 return false;
2227}
2228
2230 MachineIRBuilder &MIRBuilder) {
2231 MIRBuilder.buildSelect(Call->ReturnRegister, Call->Arguments[0],
2232 Call->Arguments[1], Call->Arguments[2]);
2233 return true;
2234}
2235
2237 MachineIRBuilder &MIRBuilder,
2238 SPIRVGlobalRegistry *GR) {
2239 createContinuedInstructions(MIRBuilder, SPIRV::OpCompositeConstruct, 3,
2240 SPIRV::OpCompositeConstructContinuedINTEL,
2241 Call->Arguments, Call->ReturnRegister,
2242 GR->getSPIRVTypeID(Call->ReturnType));
2243 return true;
2244}
2245
2247 MachineIRBuilder &MIRBuilder,
2248 SPIRVGlobalRegistry *GR) {
2249 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2250 unsigned Opcode =
2251 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2252 bool IsSet = Opcode != SPIRV::OpCooperativeMatrixStoreKHR &&
2253 Opcode != SPIRV::OpCooperativeMatrixStoreCheckedINTEL &&
2254 Opcode != SPIRV::OpCooperativeMatrixPrefetchINTEL;
2255 unsigned ArgSz = Call->Arguments.size();
2256 unsigned LiteralIdx = 0;
2257 switch (Opcode) {
2258 // Memory operand is optional and is literal.
2259 case SPIRV::OpCooperativeMatrixLoadKHR:
2260 LiteralIdx = ArgSz > 3 ? 3 : 0;
2261 break;
2262 case SPIRV::OpCooperativeMatrixStoreKHR:
2263 LiteralIdx = ArgSz > 4 ? 4 : 0;
2264 break;
2265 case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
2266 LiteralIdx = ArgSz > 7 ? 7 : 0;
2267 break;
2268 case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
2269 LiteralIdx = ArgSz > 8 ? 8 : 0;
2270 break;
2271 // Cooperative Matrix Operands operand is optional and is literal.
2272 case SPIRV::OpCooperativeMatrixMulAddKHR:
2273 LiteralIdx = ArgSz > 3 ? 3 : 0;
2274 break;
2275 };
2276
2278 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2279 if (Opcode == SPIRV::OpCooperativeMatrixPrefetchINTEL) {
2280 const uint32_t CacheLevel = getConstFromIntrinsic(Call->Arguments[3], MRI);
2281 auto MIB = MIRBuilder.buildInstr(SPIRV::OpCooperativeMatrixPrefetchINTEL)
2282 .addUse(Call->Arguments[0]) // pointer
2283 .addUse(Call->Arguments[1]) // rows
2284 .addUse(Call->Arguments[2]) // columns
2285 .addImm(CacheLevel) // cache level
2286 .addUse(Call->Arguments[4]); // memory layout
2287 if (ArgSz > 5)
2288 MIB.addUse(Call->Arguments[5]); // stride
2289 if (ArgSz > 6) {
2290 const uint32_t MemOp = getConstFromIntrinsic(Call->Arguments[6], MRI);
2291 MIB.addImm(MemOp); // memory operand
2292 }
2293 return true;
2294 }
2295 if (LiteralIdx > 0)
2296 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[LiteralIdx], MRI));
2297 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2298 if (Opcode == SPIRV::OpCooperativeMatrixLengthKHR) {
2299 SPIRVType *CoopMatrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
2300 if (!CoopMatrType)
2301 report_fatal_error("Can't find a register's type definition");
2302 MIRBuilder.buildInstr(Opcode)
2303 .addDef(Call->ReturnRegister)
2304 .addUse(TypeReg)
2305 .addUse(CoopMatrType->getOperand(0).getReg());
2306 return true;
2307 }
2308 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2309 IsSet ? TypeReg : Register(0), ImmArgs);
2310}
2311
2313 MachineIRBuilder &MIRBuilder,
2314 SPIRVGlobalRegistry *GR) {
2315 // Lookup the instruction opcode in the TableGen records.
2316 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2317 unsigned Opcode =
2318 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2319 const MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2320
2321 switch (Opcode) {
2322 case SPIRV::OpSpecConstant: {
2323 // Build the SpecID decoration.
2324 unsigned SpecId =
2325 static_cast<unsigned>(getIConstVal(Call->Arguments[0], MRI));
2326 buildOpDecorate(Call->ReturnRegister, MIRBuilder, SPIRV::Decoration::SpecId,
2327 {SpecId});
2328 // Determine the constant MI.
2329 Register ConstRegister = Call->Arguments[1];
2330 const MachineInstr *Const = getDefInstrMaybeConstant(ConstRegister, MRI);
2331 assert(Const &&
2332 (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
2333 Const->getOpcode() == TargetOpcode::G_FCONSTANT) &&
2334 "Argument should be either an int or floating-point constant");
2335 // Determine the opcode and built the OpSpec MI.
2336 const MachineOperand &ConstOperand = Const->getOperand(1);
2337 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeBool) {
2338 assert(ConstOperand.isCImm() && "Int constant operand is expected");
2339 Opcode = ConstOperand.getCImm()->getValue().getZExtValue()
2340 ? SPIRV::OpSpecConstantTrue
2341 : SPIRV::OpSpecConstantFalse;
2342 }
2343 auto MIB = MIRBuilder.buildInstr(Opcode)
2344 .addDef(Call->ReturnRegister)
2345 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2346
2347 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeBool) {
2348 if (Const->getOpcode() == TargetOpcode::G_CONSTANT)
2349 addNumImm(ConstOperand.getCImm()->getValue(), MIB);
2350 else
2351 addNumImm(ConstOperand.getFPImm()->getValueAPF().bitcastToAPInt(), MIB);
2352 }
2353 return true;
2354 }
2355 case SPIRV::OpSpecConstantComposite: {
2356 createContinuedInstructions(MIRBuilder, Opcode, 3,
2357 SPIRV::OpSpecConstantCompositeContinuedINTEL,
2358 Call->Arguments, Call->ReturnRegister,
2359 GR->getSPIRVTypeID(Call->ReturnType));
2360 return true;
2361 }
2362 default:
2363 return false;
2364 }
2365}
2366
2368 MachineIRBuilder &MIRBuilder,
2369 SPIRVGlobalRegistry *GR) {
2370 // Lookup the instruction opcode in the TableGen records.
2371 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2372 unsigned Opcode =
2373 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2374
2375 return buildExtendedBitOpsInst(Call, Opcode, MIRBuilder, GR);
2376}
2377
2379 MachineIRBuilder &MIRBuilder,
2380 SPIRVGlobalRegistry *GR) {
2381 // Lookup the instruction opcode in the TableGen records.
2382 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2383 unsigned Opcode =
2384 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2385
2386 return buildBindlessImageINTELInst(Call, Opcode, MIRBuilder, GR);
2387}
2388
2390 MachineIRBuilder &MIRBuilder,
2391 SPIRVGlobalRegistry *GR) {
2392 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2393 unsigned Opcode =
2394 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2395 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
2396}
2397
2399 unsigned Opcode, MachineIRBuilder &MIRBuilder,
2400 SPIRVGlobalRegistry *GR) {
2401 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2403 Register InputReg = Call->Arguments[0];
2404 const Type *RetTy = GR->getTypeForSPIRVType(Call->ReturnType);
2405 bool IsSRet = RetTy->isVoidTy();
2406
2407 if (IsSRet) {
2408 const LLT ValTy = MRI->getType(InputReg);
2409 Register ActualRetValReg = MRI->createGenericVirtualRegister(ValTy);
2410 SPIRVType *InstructionType =
2411 GR->getPointeeType(GR->getSPIRVTypeForVReg(InputReg));
2412 InputReg = Call->Arguments[1];
2413 auto InputType = GR->getTypeForSPIRVType(GR->getSPIRVTypeForVReg(InputReg));
2414 Register PtrInputReg;
2415 if (InputType->getTypeID() == llvm::Type::TypeID::TypedPointerTyID) {
2416 LLT InputLLT = MRI->getType(InputReg);
2417 PtrInputReg = MRI->createGenericVirtualRegister(InputLLT);
2418 SPIRVType *PtrType =
2419 GR->getPointeeType(GR->getSPIRVTypeForVReg(InputReg));
2420 MachineMemOperand *MMO1 = MIRBuilder.getMF().getMachineMemOperand(
2422 InputLLT.getSizeInBytes(), Align(4));
2423 MIRBuilder.buildLoad(PtrInputReg, InputReg, *MMO1);
2424 MRI->setRegClass(PtrInputReg, &SPIRV::iIDRegClass);
2425 GR->assignSPIRVTypeToVReg(PtrType, PtrInputReg, MIRBuilder.getMF());
2426 }
2427
2428 for (unsigned index = 2; index < 7; index++) {
2429 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[index], MRI));
2430 }
2431
2432 // Emit the instruction
2433 auto MIB = MIRBuilder.buildInstr(Opcode)
2434 .addDef(ActualRetValReg)
2435 .addUse(GR->getSPIRVTypeID(InstructionType));
2436 if (PtrInputReg)
2437 MIB.addUse(PtrInputReg);
2438 else
2439 MIB.addUse(InputReg);
2440
2441 for (uint32_t Imm : ImmArgs)
2442 MIB.addImm(Imm);
2443 unsigned Size = ValTy.getSizeInBytes();
2444 // Store result to the pointer passed in Arg[0]
2445 MachineMemOperand *MMO = MIRBuilder.getMF().getMachineMemOperand(
2447 MRI->setRegClass(ActualRetValReg, &SPIRV::pIDRegClass);
2448 MIRBuilder.buildStore(ActualRetValReg, Call->Arguments[0], *MMO);
2449 return true;
2450 } else {
2451 for (unsigned index = 1; index < 6; index++)
2452 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[index], MRI));
2453
2454 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2455 GR->getSPIRVTypeID(Call->ReturnType), ImmArgs);
2456 }
2457}
2458
2460 MachineIRBuilder &MIRBuilder,
2461 SPIRVGlobalRegistry *GR) {
2462 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2463 unsigned Opcode =
2464 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2465
2466 return buildAPFixedPointInst(Call, Opcode, MIRBuilder, GR);
2467}
2468
2469static bool
2471 MachineIRBuilder &MIRBuilder,
2472 SPIRVGlobalRegistry *GR) {
2473 // Lookup the instruction opcode in the TableGen records.
2474 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2475 unsigned Opcode =
2476 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2477
2478 return buildTernaryBitwiseFunctionINTELInst(Call, Opcode, MIRBuilder, GR);
2479}
2480
2482 MachineIRBuilder &MIRBuilder,
2483 SPIRVGlobalRegistry *GR) {
2484 // Lookup the instruction opcode in the TableGen records.
2485 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2486 unsigned Opcode =
2487 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2488
2489 return build2DBlockIOINTELInst(Call, Opcode, MIRBuilder, GR);
2490}
2491
2493 MachineIRBuilder &MIRBuilder,
2494 SPIRVGlobalRegistry *GR) {
2495 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2496 unsigned Opcode =
2497 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2498
2499 unsigned Scope = SPIRV::Scope::Workgroup;
2500 if (Builtin->Name.contains("sub_group"))
2501 Scope = SPIRV::Scope::Subgroup;
2502
2503 return buildPipeInst(Call, Opcode, Scope, MIRBuilder, GR);
2504}
2505
2507 MachineIRBuilder &MIRBuilder,
2508 SPIRVGlobalRegistry *GR) {
2509 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2510 unsigned Opcode =
2511 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2512
2513 bool IsSet = Opcode != SPIRV::OpPredicatedStoreINTEL;
2514 unsigned ArgSz = Call->Arguments.size();
2516 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2517 // Memory operand is optional and is literal.
2518 if (ArgSz > 3)
2519 ImmArgs.push_back(
2520 getConstFromIntrinsic(Call->Arguments[/*Literal index*/ 3], MRI));
2521
2522 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2523 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2524 IsSet ? TypeReg : Register(0), ImmArgs);
2525}
2526
2528 MachineIRBuilder &MIRBuilder,
2529 SPIRVGlobalRegistry *GR) {
2530 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2531 SPIRVType *PtrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
2532 assert(PtrType->getOpcode() == SPIRV::OpTypePointer &&
2533 PtrType->getOperand(2).isReg());
2534 Register TypeReg = PtrType->getOperand(2).getReg();
2536 MachineFunction &MF = MIRBuilder.getMF();
2537 Register TmpReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2538 GR->assignSPIRVTypeToVReg(StructType, TmpReg, MF);
2539 // Skip the first arg, it's the destination pointer. OpBuildNDRange takes
2540 // three other arguments, so pass zero constant on absence.
2541 unsigned NumArgs = Call->Arguments.size();
2542 assert(NumArgs >= 2);
2543 Register GlobalWorkSize = Call->Arguments[NumArgs < 4 ? 1 : 2];
2544 Register LocalWorkSize =
2545 NumArgs == 2 ? Register(0) : Call->Arguments[NumArgs < 4 ? 2 : 3];
2546 Register GlobalWorkOffset = NumArgs <= 3 ? Register(0) : Call->Arguments[1];
2547 if (NumArgs < 4) {
2548 Register Const;
2549 SPIRVType *SpvTy = GR->getSPIRVTypeForVReg(GlobalWorkSize);
2550 if (SpvTy->getOpcode() == SPIRV::OpTypePointer) {
2551 MachineInstr *DefInstr = MRI->getUniqueVRegDef(GlobalWorkSize);
2552 assert(DefInstr && isSpvIntrinsic(*DefInstr, Intrinsic::spv_gep) &&
2553 DefInstr->getOperand(3).isReg());
2554 Register GWSPtr = DefInstr->getOperand(3).getReg();
2555 // TODO: Maybe simplify generation of the type of the fields.
2556 unsigned Size = Call->Builtin->Name == "ndrange_3D" ? 3 : 2;
2557 unsigned BitWidth = GR->getPointerSize() == 64 ? 64 : 32;
2559 Type *FieldTy = ArrayType::get(BaseTy, Size);
2560 SPIRVType *SpvFieldTy = GR->getOrCreateSPIRVType(
2561 FieldTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, true);
2562 GlobalWorkSize = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2563 GR->assignSPIRVTypeToVReg(SpvFieldTy, GlobalWorkSize, MF);
2564 MIRBuilder.buildInstr(SPIRV::OpLoad)
2565 .addDef(GlobalWorkSize)
2566 .addUse(GR->getSPIRVTypeID(SpvFieldTy))
2567 .addUse(GWSPtr);
2568 const SPIRVSubtarget &ST =
2569 cast<SPIRVSubtarget>(MIRBuilder.getMF().getSubtarget());
2570 Const = GR->getOrCreateConstIntArray(0, Size, *MIRBuilder.getInsertPt(),
2571 SpvFieldTy, *ST.getInstrInfo());
2572 } else {
2573 Const = GR->buildConstantInt(0, MIRBuilder, SpvTy, true);
2574 }
2575 if (!LocalWorkSize.isValid())
2576 LocalWorkSize = Const;
2577 if (!GlobalWorkOffset.isValid())
2578 GlobalWorkOffset = Const;
2579 }
2580 assert(LocalWorkSize.isValid() && GlobalWorkOffset.isValid());
2581 MIRBuilder.buildInstr(SPIRV::OpBuildNDRange)
2582 .addDef(TmpReg)
2583 .addUse(TypeReg)
2584 .addUse(GlobalWorkSize)
2585 .addUse(LocalWorkSize)
2586 .addUse(GlobalWorkOffset);
2587 return MIRBuilder.buildInstr(SPIRV::OpStore)
2588 .addUse(Call->Arguments[0])
2589 .addUse(TmpReg);
2590}
2591
2592// TODO: maybe move to the global register.
2593static SPIRVType *
2595 SPIRVGlobalRegistry *GR) {
2596 LLVMContext &Context = MIRBuilder.getMF().getFunction().getContext();
2597 unsigned SC1 = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
2598 Type *PtrType = PointerType::get(Context, SC1);
2599 return GR->getOrCreateSPIRVType(PtrType, MIRBuilder,
2600 SPIRV::AccessQualifier::ReadWrite, true);
2601}
2602
2604 MachineIRBuilder &MIRBuilder,
2605 SPIRVGlobalRegistry *GR) {
2606 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2607 const DataLayout &DL = MIRBuilder.getDataLayout();
2608 bool IsSpirvOp = Call->isSpirvOp();
2609 bool HasEvents = Call->Builtin->Name.contains("events") || IsSpirvOp;
2610 const SPIRVType *Int32Ty = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
2611
2612 // Make vararg instructions before OpEnqueueKernel.
2613 // Local sizes arguments: Sizes of block invoke arguments. Clang generates
2614 // local size operands as an array, so we need to unpack them.
2615 SmallVector<Register, 16> LocalSizes;
2616 if (Call->Builtin->Name.contains("_varargs") || IsSpirvOp) {
2617 const unsigned LocalSizeArrayIdx = HasEvents ? 9 : 6;
2618 Register GepReg = Call->Arguments[LocalSizeArrayIdx];
2619 MachineInstr *GepMI = MRI->getUniqueVRegDef(GepReg);
2620 assert(isSpvIntrinsic(*GepMI, Intrinsic::spv_gep) &&
2621 GepMI->getOperand(3).isReg());
2622 Register ArrayReg = GepMI->getOperand(3).getReg();
2623 MachineInstr *ArrayMI = MRI->getUniqueVRegDef(ArrayReg);
2624 const Type *LocalSizeTy = getMachineInstrType(ArrayMI);
2625 assert(LocalSizeTy && "Local size type is expected");
2626 const uint64_t LocalSizeNum =
2627 cast<ArrayType>(LocalSizeTy)->getNumElements();
2628 unsigned SC = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
2629 const LLT LLType = LLT::pointer(SC, GR->getPointerSize());
2630 const SPIRVType *PointerSizeTy = GR->getOrCreateSPIRVPointerType(
2631 Int32Ty, MIRBuilder, SPIRV::StorageClass::Function);
2632 for (unsigned I = 0; I < LocalSizeNum; ++I) {
2633 Register Reg = MRI->createVirtualRegister(&SPIRV::pIDRegClass);
2634 MRI->setType(Reg, LLType);
2635 GR->assignSPIRVTypeToVReg(PointerSizeTy, Reg, MIRBuilder.getMF());
2636 auto GEPInst = MIRBuilder.buildIntrinsic(
2637 Intrinsic::spv_gep, ArrayRef<Register>{Reg}, true, false);
2638 GEPInst
2639 .addImm(GepMI->getOperand(2).getImm()) // In bound.
2640 .addUse(ArrayMI->getOperand(0).getReg()) // Alloca.
2641 .addUse(buildConstantIntReg32(0, MIRBuilder, GR)) // Indices.
2642 .addUse(buildConstantIntReg32(I, MIRBuilder, GR));
2643 LocalSizes.push_back(Reg);
2644 }
2645 }
2646
2647 // SPIRV OpEnqueueKernel instruction has 10+ arguments.
2648 auto MIB = MIRBuilder.buildInstr(SPIRV::OpEnqueueKernel)
2649 .addDef(Call->ReturnRegister)
2651
2652 // Copy all arguments before block invoke function pointer.
2653 const unsigned BlockFIdx = HasEvents ? 6 : 3;
2654 for (unsigned i = 0; i < BlockFIdx; i++)
2655 MIB.addUse(Call->Arguments[i]);
2656
2657 // If there are no event arguments in the original call, add dummy ones.
2658 if (!HasEvents) {
2659 MIB.addUse(buildConstantIntReg32(0, MIRBuilder, GR)); // Dummy num events.
2660 Register NullPtr = GR->getOrCreateConstNullPtr(
2661 MIRBuilder, getOrCreateSPIRVDeviceEventPointer(MIRBuilder, GR));
2662 MIB.addUse(NullPtr); // Dummy wait events.
2663 MIB.addUse(NullPtr); // Dummy ret event.
2664 }
2665
2666 MachineInstr *BlockMI = getBlockStructInstr(Call->Arguments[BlockFIdx], MRI);
2667 assert(BlockMI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
2668 // Invoke: Pointer to invoke function.
2669 MIB.addGlobalAddress(BlockMI->getOperand(1).getGlobal());
2670
2671 Register BlockLiteralReg = Call->Arguments[BlockFIdx + 1];
2672 // Param: Pointer to block literal.
2673 MIB.addUse(BlockLiteralReg);
2674
2675 Type *PType = const_cast<Type *>(getBlockStructType(BlockLiteralReg, MRI));
2676 // TODO: these numbers should be obtained from block literal structure.
2677 // Param Size: Size of block literal structure.
2678 MIB.addUse(buildConstantIntReg32(DL.getTypeStoreSize(PType), MIRBuilder, GR));
2679 // Param Aligment: Aligment of block literal structure.
2680 MIB.addUse(buildConstantIntReg32(DL.getPrefTypeAlign(PType).value(),
2681 MIRBuilder, GR));
2682
2683 for (unsigned i = 0; i < LocalSizes.size(); i++)
2684 MIB.addUse(LocalSizes[i]);
2685 return true;
2686}
2687
2689 MachineIRBuilder &MIRBuilder,
2690 SPIRVGlobalRegistry *GR) {
2691 // Lookup the instruction opcode in the TableGen records.
2692 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2693 unsigned Opcode =
2694 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2695
2696 switch (Opcode) {
2697 case SPIRV::OpRetainEvent:
2698 case SPIRV::OpReleaseEvent:
2699 return MIRBuilder.buildInstr(Opcode).addUse(Call->Arguments[0]);
2700 case SPIRV::OpCreateUserEvent:
2701 case SPIRV::OpGetDefaultQueue:
2702 return MIRBuilder.buildInstr(Opcode)
2703 .addDef(Call->ReturnRegister)
2704 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2705 case SPIRV::OpIsValidEvent:
2706 return MIRBuilder.buildInstr(Opcode)
2707 .addDef(Call->ReturnRegister)
2708 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2709 .addUse(Call->Arguments[0]);
2710 case SPIRV::OpSetUserEventStatus:
2711 return MIRBuilder.buildInstr(Opcode)
2712 .addUse(Call->Arguments[0])
2713 .addUse(Call->Arguments[1]);
2714 case SPIRV::OpCaptureEventProfilingInfo:
2715 return MIRBuilder.buildInstr(Opcode)
2716 .addUse(Call->Arguments[0])
2717 .addUse(Call->Arguments[1])
2718 .addUse(Call->Arguments[2]);
2719 case SPIRV::OpBuildNDRange:
2720 return buildNDRange(Call, MIRBuilder, GR);
2721 case SPIRV::OpEnqueueKernel:
2722 return buildEnqueueKernel(Call, MIRBuilder, GR);
2723 default:
2724 return false;
2725 }
2726}
2727
2729 MachineIRBuilder &MIRBuilder,
2730 SPIRVGlobalRegistry *GR) {
2731 // Lookup the instruction opcode in the TableGen records.
2732 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2733 unsigned Opcode =
2734 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2735
2736 bool IsSet = Opcode == SPIRV::OpGroupAsyncCopy;
2737 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2738 if (Call->isSpirvOp())
2739 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2740 IsSet ? TypeReg : Register(0));
2741
2742 auto Scope = buildConstantIntReg32(SPIRV::Scope::Workgroup, MIRBuilder, GR);
2743
2744 switch (Opcode) {
2745 case SPIRV::OpGroupAsyncCopy: {
2746 SPIRVType *NewType =
2747 Call->ReturnType->getOpcode() == SPIRV::OpTypeEvent
2748 ? nullptr
2749 : GR->getOrCreateSPIRVTypeByName("spirv.Event", MIRBuilder, true);
2750 Register TypeReg = GR->getSPIRVTypeID(NewType ? NewType : Call->ReturnType);
2751 unsigned NumArgs = Call->Arguments.size();
2752 Register EventReg = Call->Arguments[NumArgs - 1];
2753 bool Res = MIRBuilder.buildInstr(Opcode)
2754 .addDef(Call->ReturnRegister)
2755 .addUse(TypeReg)
2756 .addUse(Scope)
2757 .addUse(Call->Arguments[0])
2758 .addUse(Call->Arguments[1])
2759 .addUse(Call->Arguments[2])
2760 .addUse(Call->Arguments.size() > 4
2761 ? Call->Arguments[3]
2762 : buildConstantIntReg32(1, MIRBuilder, GR))
2763 .addUse(EventReg);
2764 if (NewType != nullptr)
2765 updateRegType(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder,
2766 MIRBuilder.getMF().getRegInfo());
2767 return Res;
2768 }
2769 case SPIRV::OpGroupWaitEvents:
2770 return MIRBuilder.buildInstr(Opcode)
2771 .addUse(Scope)
2772 .addUse(Call->Arguments[0])
2773 .addUse(Call->Arguments[1]);
2774 default:
2775 return false;
2776 }
2777}
2778
2779static bool generateConvertInst(const StringRef DemangledCall,
2781 MachineIRBuilder &MIRBuilder,
2782 SPIRVGlobalRegistry *GR) {
2783 // Lookup the conversion builtin in the TableGen records.
2784 const SPIRV::ConvertBuiltin *Builtin =
2785 SPIRV::lookupConvertBuiltin(Call->Builtin->Name, Call->Builtin->Set);
2786
2787 if (!Builtin && Call->isSpirvOp()) {
2788 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2789 unsigned Opcode =
2790 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2791 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2792 GR->getSPIRVTypeID(Call->ReturnType));
2793 }
2794
2795 assert(Builtin && "Conversion builtin not found.");
2796 if (Builtin->IsSaturated)
2797 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
2798 SPIRV::Decoration::SaturatedConversion, {});
2799 if (Builtin->IsRounded)
2800 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
2801 SPIRV::Decoration::FPRoundingMode,
2802 {(unsigned)Builtin->RoundingMode});
2803
2804 std::string NeedExtMsg; // no errors if empty
2805 bool IsRightComponentsNumber = true; // check if input/output accepts vectors
2806 unsigned Opcode = SPIRV::OpNop;
2807 if (GR->isScalarOrVectorOfType(Call->Arguments[0], SPIRV::OpTypeInt)) {
2808 // Int -> ...
2809 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2810 // Int -> Int
2811 if (Builtin->IsSaturated)
2812 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpSatConvertUToS
2813 : SPIRV::OpSatConvertSToU;
2814 else
2815 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpUConvert
2816 : SPIRV::OpSConvert;
2817 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2818 SPIRV::OpTypeFloat)) {
2819 // Int -> Float
2820 if (Builtin->IsBfloat16) {
2821 const auto *ST = static_cast<const SPIRVSubtarget *>(
2822 &MIRBuilder.getMF().getSubtarget());
2823 if (!ST->canUseExtension(
2824 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2825 NeedExtMsg = "SPV_INTEL_bfloat16_conversion";
2826 IsRightComponentsNumber =
2827 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2828 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2829 Opcode = SPIRV::OpConvertBF16ToFINTEL;
2830 } else {
2831 bool IsSourceSigned =
2832 DemangledCall[DemangledCall.find_first_of('(') + 1] != 'u';
2833 Opcode = IsSourceSigned ? SPIRV::OpConvertSToF : SPIRV::OpConvertUToF;
2834 }
2835 }
2836 } else if (GR->isScalarOrVectorOfType(Call->Arguments[0],
2837 SPIRV::OpTypeFloat)) {
2838 // Float -> ...
2839 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2840 // Float -> Int
2841 if (Builtin->IsBfloat16) {
2842 const auto *ST = static_cast<const SPIRVSubtarget *>(
2843 &MIRBuilder.getMF().getSubtarget());
2844 if (!ST->canUseExtension(
2845 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2846 NeedExtMsg = "SPV_INTEL_bfloat16_conversion";
2847 IsRightComponentsNumber =
2848 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2849 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2850 Opcode = SPIRV::OpConvertFToBF16INTEL;
2851 } else {
2852 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpConvertFToS
2853 : SPIRV::OpConvertFToU;
2854 }
2855 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2856 SPIRV::OpTypeFloat)) {
2857 if (Builtin->IsTF32) {
2858 const auto *ST = static_cast<const SPIRVSubtarget *>(
2859 &MIRBuilder.getMF().getSubtarget());
2860 if (!ST->canUseExtension(
2861 SPIRV::Extension::SPV_INTEL_tensor_float32_conversion))
2862 NeedExtMsg = "SPV_INTEL_tensor_float32_conversion";
2863 IsRightComponentsNumber =
2864 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2865 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2866 Opcode = SPIRV::OpRoundFToTF32INTEL;
2867 } else {
2868 // Float -> Float
2869 Opcode = SPIRV::OpFConvert;
2870 }
2871 }
2872 }
2873
2874 if (!NeedExtMsg.empty()) {
2875 std::string DiagMsg = std::string(Builtin->Name) +
2876 ": the builtin requires the following SPIR-V "
2877 "extension: " +
2878 NeedExtMsg;
2879 report_fatal_error(DiagMsg.c_str(), false);
2880 }
2881 if (!IsRightComponentsNumber) {
2882 std::string DiagMsg =
2883 std::string(Builtin->Name) +
2884 ": result and argument must have the same number of components";
2885 report_fatal_error(DiagMsg.c_str(), false);
2886 }
2887 assert(Opcode != SPIRV::OpNop &&
2888 "Conversion between the types not implemented!");
2889
2890 MIRBuilder.buildInstr(Opcode)
2891 .addDef(Call->ReturnRegister)
2892 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2893 .addUse(Call->Arguments[0]);
2894 return true;
2895}
2896
2898 MachineIRBuilder &MIRBuilder,
2899 SPIRVGlobalRegistry *GR) {
2900 // Lookup the vector load/store builtin in the TableGen records.
2901 const SPIRV::VectorLoadStoreBuiltin *Builtin =
2902 SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2903 Call->Builtin->Set);
2904 // Build extended instruction.
2905 auto MIB =
2906 MIRBuilder.buildInstr(SPIRV::OpExtInst)
2907 .addDef(Call->ReturnRegister)
2908 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2909 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
2910 .addImm(Builtin->Number);
2911 for (auto Argument : Call->Arguments)
2912 MIB.addUse(Argument);
2913 if (Builtin->Name.contains("load") && Builtin->ElementCount > 1)
2914 MIB.addImm(Builtin->ElementCount);
2915
2916 // Rounding mode should be passed as a last argument in the MI for builtins
2917 // like "vstorea_halfn_r".
2918 if (Builtin->IsRounded)
2919 MIB.addImm(static_cast<uint32_t>(Builtin->RoundingMode));
2920 return true;
2921}
2922
2924 MachineIRBuilder &MIRBuilder,
2925 SPIRVGlobalRegistry *GR) {
2926 // Lookup the instruction opcode in the TableGen records.
2927 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2928 unsigned Opcode =
2929 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2930 bool IsLoad = Opcode == SPIRV::OpLoad;
2931 // Build the instruction.
2932 auto MIB = MIRBuilder.buildInstr(Opcode);
2933 if (IsLoad) {
2934 MIB.addDef(Call->ReturnRegister);
2935 MIB.addUse(GR->getSPIRVTypeID(Call->ReturnType));
2936 }
2937 // Add a pointer to the value to load/store.
2938 MIB.addUse(Call->Arguments[0]);
2939 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2940 // Add a value to store.
2941 if (!IsLoad)
2942 MIB.addUse(Call->Arguments[1]);
2943 // Add optional memory attributes and an alignment.
2944 unsigned NumArgs = Call->Arguments.size();
2945 if ((IsLoad && NumArgs >= 2) || NumArgs >= 3)
2946 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 1 : 2], MRI));
2947 if ((IsLoad && NumArgs >= 3) || NumArgs >= 4)
2948 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 2 : 3], MRI));
2949 return true;
2950}
2951
2952namespace SPIRV {
2953// Try to find a builtin function attributes by a demangled function name and
2954// return a tuple <builtin group, op code, ext instruction number>, or a special
2955// tuple value <-1, 0, 0> if the builtin function is not found.
2956// Not all builtin functions are supported, only those with a ready-to-use op
2957// code or instruction number defined in TableGen.
2958// TODO: consider a major rework of mapping demangled calls into a builtin
2959// functions to unify search and decrease number of individual cases.
2960std::tuple<int, unsigned, unsigned>
2961mapBuiltinToOpcode(const StringRef DemangledCall,
2962 SPIRV::InstructionSet::InstructionSet Set) {
2963 Register Reg;
2965 std::unique_ptr<const IncomingCall> Call =
2966 lookupBuiltin(DemangledCall, Set, Reg, nullptr, Args);
2967 if (!Call)
2968 return std::make_tuple(-1, 0, 0);
2969
2970 switch (Call->Builtin->Group) {
2971 case SPIRV::Relational:
2972 case SPIRV::Atomic:
2973 case SPIRV::Barrier:
2974 case SPIRV::CastToPtr:
2975 case SPIRV::ImageMiscQuery:
2976 case SPIRV::SpecConstant:
2977 case SPIRV::Enqueue:
2978 case SPIRV::AsyncCopy:
2979 case SPIRV::LoadStore:
2980 case SPIRV::CoopMatr:
2981 if (const auto *R =
2982 SPIRV::lookupNativeBuiltin(Call->Builtin->Name, Call->Builtin->Set))
2983 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2984 break;
2985 case SPIRV::Extended:
2986 if (const auto *R = SPIRV::lookupExtendedBuiltin(Call->Builtin->Name,
2987 Call->Builtin->Set))
2988 return std::make_tuple(Call->Builtin->Group, 0, R->Number);
2989 break;
2990 case SPIRV::VectorLoadStore:
2991 if (const auto *R = SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2992 Call->Builtin->Set))
2993 return std::make_tuple(SPIRV::Extended, 0, R->Number);
2994 break;
2995 case SPIRV::Group:
2996 if (const auto *R = SPIRV::lookupGroupBuiltin(Call->Builtin->Name))
2997 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2998 break;
2999 case SPIRV::AtomicFloating:
3000 if (const auto *R = SPIRV::lookupAtomicFloatingBuiltin(Call->Builtin->Name))
3001 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3002 break;
3003 case SPIRV::IntelSubgroups:
3004 if (const auto *R = SPIRV::lookupIntelSubgroupsBuiltin(Call->Builtin->Name))
3005 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3006 break;
3007 case SPIRV::GroupUniform:
3008 if (const auto *R = SPIRV::lookupGroupUniformBuiltin(Call->Builtin->Name))
3009 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3010 break;
3011 case SPIRV::IntegerDot:
3012 if (const auto *R =
3013 SPIRV::lookupIntegerDotProductBuiltin(Call->Builtin->Name))
3014 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3015 break;
3016 case SPIRV::WriteImage:
3017 return std::make_tuple(Call->Builtin->Group, SPIRV::OpImageWrite, 0);
3018 case SPIRV::Select:
3019 return std::make_tuple(Call->Builtin->Group, TargetOpcode::G_SELECT, 0);
3020 case SPIRV::Construct:
3021 return std::make_tuple(Call->Builtin->Group, SPIRV::OpCompositeConstruct,
3022 0);
3023 case SPIRV::KernelClock:
3024 return std::make_tuple(Call->Builtin->Group, SPIRV::OpReadClockKHR, 0);
3025 default:
3026 return std::make_tuple(-1, 0, 0);
3027 }
3028 return std::make_tuple(-1, 0, 0);
3029}
3030
3031std::optional<bool> lowerBuiltin(const StringRef DemangledCall,
3032 SPIRV::InstructionSet::InstructionSet Set,
3033 MachineIRBuilder &MIRBuilder,
3034 const Register OrigRet, const Type *OrigRetTy,
3035 const SmallVectorImpl<Register> &Args,
3036 SPIRVGlobalRegistry *GR, const CallBase &CB) {
3037 LLVM_DEBUG(dbgs() << "Lowering builtin call: " << DemangledCall << "\n");
3038
3039 // Lookup the builtin in the TableGen records.
3040 SPIRVType *SpvType = GR->getSPIRVTypeForVReg(OrigRet);
3041 assert(SpvType && "Inconsistent return register: expected valid type info");
3042 std::unique_ptr<const IncomingCall> Call =
3043 lookupBuiltin(DemangledCall, Set, OrigRet, SpvType, Args);
3044
3045 if (!Call) {
3046 LLVM_DEBUG(dbgs() << "Builtin record was not found!\n");
3047 return std::nullopt;
3048 }
3049
3050 // TODO: check if the provided args meet the builtin requirments.
3051 assert(Args.size() >= Call->Builtin->MinNumArgs &&
3052 "Too few arguments to generate the builtin");
3053 if (Call->Builtin->MaxNumArgs && Args.size() > Call->Builtin->MaxNumArgs)
3054 LLVM_DEBUG(dbgs() << "More arguments provided than required!\n");
3055
3056 // Match the builtin with implementation based on the grouping.
3057 switch (Call->Builtin->Group) {
3058 case SPIRV::Extended:
3059 return generateExtInst(Call.get(), MIRBuilder, GR, CB);
3060 case SPIRV::Relational:
3061 return generateRelationalInst(Call.get(), MIRBuilder, GR);
3062 case SPIRV::Group:
3063 return generateGroupInst(Call.get(), MIRBuilder, GR);
3064 case SPIRV::Variable:
3065 return generateBuiltinVar(Call.get(), MIRBuilder, GR);
3066 case SPIRV::Atomic:
3067 return generateAtomicInst(Call.get(), MIRBuilder, GR);
3068 case SPIRV::AtomicFloating:
3069 return generateAtomicFloatingInst(Call.get(), MIRBuilder, GR);
3070 case SPIRV::Barrier:
3071 return generateBarrierInst(Call.get(), MIRBuilder, GR);
3072 case SPIRV::CastToPtr:
3073 return generateCastToPtrInst(Call.get(), MIRBuilder, GR);
3074 case SPIRV::Dot:
3075 case SPIRV::IntegerDot:
3076 return generateDotOrFMulInst(DemangledCall, Call.get(), MIRBuilder, GR);
3077 case SPIRV::Wave:
3078 return generateWaveInst(Call.get(), MIRBuilder, GR);
3079 case SPIRV::ICarryBorrow:
3080 return generateICarryBorrowInst(Call.get(), MIRBuilder, GR);
3081 case SPIRV::GetQuery:
3082 return generateGetQueryInst(Call.get(), MIRBuilder, GR);
3083 case SPIRV::ImageSizeQuery:
3084 return generateImageSizeQueryInst(Call.get(), MIRBuilder, GR);
3085 case SPIRV::ImageMiscQuery:
3086 return generateImageMiscQueryInst(Call.get(), MIRBuilder, GR);
3087 case SPIRV::ReadImage:
3088 return generateReadImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
3089 case SPIRV::WriteImage:
3090 return generateWriteImageInst(Call.get(), MIRBuilder, GR);
3091 case SPIRV::SampleImage:
3092 return generateSampleImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
3093 case SPIRV::Select:
3094 return generateSelectInst(Call.get(), MIRBuilder);
3095 case SPIRV::Construct:
3096 return generateConstructInst(Call.get(), MIRBuilder, GR);
3097 case SPIRV::SpecConstant:
3098 return generateSpecConstantInst(Call.get(), MIRBuilder, GR);
3099 case SPIRV::Enqueue:
3100 return generateEnqueueInst(Call.get(), MIRBuilder, GR);
3101 case SPIRV::AsyncCopy:
3102 return generateAsyncCopy(Call.get(), MIRBuilder, GR);
3103 case SPIRV::Convert:
3104 return generateConvertInst(DemangledCall, Call.get(), MIRBuilder, GR);
3105 case SPIRV::VectorLoadStore:
3106 return generateVectorLoadStoreInst(Call.get(), MIRBuilder, GR);
3107 case SPIRV::LoadStore:
3108 return generateLoadStoreInst(Call.get(), MIRBuilder, GR);
3109 case SPIRV::IntelSubgroups:
3110 return generateIntelSubgroupsInst(Call.get(), MIRBuilder, GR);
3111 case SPIRV::GroupUniform:
3112 return generateGroupUniformInst(Call.get(), MIRBuilder, GR);
3113 case SPIRV::KernelClock:
3114 return generateKernelClockInst(Call.get(), MIRBuilder, GR);
3115 case SPIRV::CoopMatr:
3116 return generateCoopMatrInst(Call.get(), MIRBuilder, GR);
3117 case SPIRV::ExtendedBitOps:
3118 return generateExtendedBitOpsInst(Call.get(), MIRBuilder, GR);
3119 case SPIRV::BindlessINTEL:
3120 return generateBindlessImageINTELInst(Call.get(), MIRBuilder, GR);
3121 case SPIRV::TernaryBitwiseINTEL:
3122 return generateTernaryBitwiseFunctionINTELInst(Call.get(), MIRBuilder, GR);
3123 case SPIRV::Block2DLoadStore:
3124 return generate2DBlockIOINTELInst(Call.get(), MIRBuilder, GR);
3125 case SPIRV::Pipe:
3126 return generatePipeInst(Call.get(), MIRBuilder, GR);
3127 case SPIRV::PredicatedLoadStore:
3128 return generatePredicatedLoadStoreInst(Call.get(), MIRBuilder, GR);
3129 case SPIRV::BlockingPipes:
3130 return generateBlockingPipesInst(Call.get(), MIRBuilder, GR);
3131 case SPIRV::ArbitraryPrecisionFixedPoint:
3132 return generateAPFixedPointInst(Call.get(), MIRBuilder, GR);
3133 }
3134 return false;
3135}
3136
3138 // Parse strings representing OpenCL builtin types.
3139 if (hasBuiltinTypePrefix(TypeStr)) {
3140 // OpenCL builtin types in demangled call strings have the following format:
3141 // e.g. ocl_image2d_ro
3142 [[maybe_unused]] bool IsOCLBuiltinType = TypeStr.consume_front("ocl_");
3143 assert(IsOCLBuiltinType && "Invalid OpenCL builtin prefix");
3144
3145 // Check if this is pointer to a builtin type and not just pointer
3146 // representing a builtin type. In case it is a pointer to builtin type,
3147 // this will require additional handling in the method calling
3148 // parseBuiltinCallArgumentBaseType(...) as this function only retrieves the
3149 // base types.
3150 if (TypeStr.ends_with("*"))
3151 TypeStr = TypeStr.slice(0, TypeStr.find_first_of(" *"));
3152
3153 return parseBuiltinTypeNameToTargetExtType("opencl." + TypeStr.str() + "_t",
3154 Ctx);
3155 }
3156
3157 // Parse type name in either "typeN" or "type vector[N]" format, where
3158 // N is the number of elements of the vector.
3159 Type *BaseType;
3160 unsigned VecElts = 0;
3161
3162 BaseType = parseBasicTypeName(TypeStr, Ctx);
3163 if (!BaseType)
3164 // Unable to recognize SPIRV type name.
3165 return nullptr;
3166
3167 // Handle "typeN*" or "type vector[N]*".
3168 TypeStr.consume_back("*");
3169
3170 if (TypeStr.consume_front(" vector["))
3171 TypeStr = TypeStr.substr(0, TypeStr.find(']'));
3172
3173 TypeStr.getAsInteger(10, VecElts);
3174 if (VecElts > 0)
3176 BaseType->isVoidTy() ? Type::getInt8Ty(Ctx) : BaseType, VecElts, false);
3177
3178 return BaseType;
3179}
3180
3182 const StringRef DemangledCall, LLVMContext &Ctx) {
3183 auto Pos1 = DemangledCall.find('(');
3184 if (Pos1 == StringRef::npos)
3185 return false;
3186 auto Pos2 = DemangledCall.find(')');
3187 if (Pos2 == StringRef::npos || Pos1 > Pos2)
3188 return false;
3189 DemangledCall.slice(Pos1 + 1, Pos2)
3190 .split(BuiltinArgsTypeStrs, ',', -1, false);
3191 return true;
3192}
3193
3195 unsigned ArgIdx, LLVMContext &Ctx) {
3196 SmallVector<StringRef, 10> BuiltinArgsTypeStrs;
3197 parseBuiltinTypeStr(BuiltinArgsTypeStrs, DemangledCall, Ctx);
3198 if (ArgIdx >= BuiltinArgsTypeStrs.size())
3199 return nullptr;
3200 StringRef TypeStr = BuiltinArgsTypeStrs[ArgIdx].trim();
3201 return parseBuiltinCallArgumentType(TypeStr, Ctx);
3202}
3203
3208
3209#define GET_BuiltinTypes_DECL
3210#define GET_BuiltinTypes_IMPL
3211
3216
3217#define GET_OpenCLTypes_DECL
3218#define GET_OpenCLTypes_IMPL
3219
3220#include "SPIRVGenTables.inc"
3221} // namespace SPIRV
3222
3223//===----------------------------------------------------------------------===//
3224// Misc functions for parsing builtin types.
3225//===----------------------------------------------------------------------===//
3226
3227static Type *parseTypeString(const StringRef Name, LLVMContext &Context) {
3228 if (Name.starts_with("void"))
3229 return Type::getVoidTy(Context);
3230 else if (Name.starts_with("int") || Name.starts_with("uint"))
3231 return Type::getInt32Ty(Context);
3232 else if (Name.starts_with("float"))
3233 return Type::getFloatTy(Context);
3234 else if (Name.starts_with("half"))
3235 return Type::getHalfTy(Context);
3236 report_fatal_error("Unable to recognize type!");
3237}
3238
3239//===----------------------------------------------------------------------===//
3240// Implementation functions for builtin types.
3241//===----------------------------------------------------------------------===//
3242
3244 const SPIRV::BuiltinType *TypeRecord,
3245 MachineIRBuilder &MIRBuilder,
3246 SPIRVGlobalRegistry *GR) {
3247 unsigned Opcode = TypeRecord->Opcode;
3248 // Create or get an existing type from GlobalRegistry.
3249 return GR->getOrCreateOpTypeByOpcode(ExtensionType, MIRBuilder, Opcode);
3250}
3251
3253 SPIRVGlobalRegistry *GR) {
3254 // Create or get an existing type from GlobalRegistry.
3255 return GR->getOrCreateOpTypeSampler(MIRBuilder);
3256}
3257
3258static SPIRVType *getPipeType(const TargetExtType *ExtensionType,
3259 MachineIRBuilder &MIRBuilder,
3260 SPIRVGlobalRegistry *GR) {
3261 assert(ExtensionType->getNumIntParameters() == 1 &&
3262 "Invalid number of parameters for SPIR-V pipe builtin!");
3263 // Create or get an existing type from GlobalRegistry.
3264 return GR->getOrCreateOpTypePipe(MIRBuilder,
3265 SPIRV::AccessQualifier::AccessQualifier(
3266 ExtensionType->getIntParameter(0)));
3267}
3268
3269static SPIRVType *getCoopMatrType(const TargetExtType *ExtensionType,
3270 MachineIRBuilder &MIRBuilder,
3271 SPIRVGlobalRegistry *GR) {
3272 assert(ExtensionType->getNumIntParameters() == 4 &&
3273 "Invalid number of parameters for SPIR-V coop matrices builtin!");
3274 assert(ExtensionType->getNumTypeParameters() == 1 &&
3275 "SPIR-V coop matrices builtin type must have a type parameter!");
3276 const SPIRVType *ElemType =
3277 GR->getOrCreateSPIRVType(ExtensionType->getTypeParameter(0), MIRBuilder,
3278 SPIRV::AccessQualifier::ReadWrite, true);
3279 // Create or get an existing type from GlobalRegistry.
3280 return GR->getOrCreateOpTypeCoopMatr(
3281 MIRBuilder, ExtensionType, ElemType, ExtensionType->getIntParameter(0),
3282 ExtensionType->getIntParameter(1), ExtensionType->getIntParameter(2),
3283 ExtensionType->getIntParameter(3), true);
3284}
3285
3287 MachineIRBuilder &MIRBuilder,
3288 SPIRVGlobalRegistry *GR) {
3289 SPIRVType *OpaqueImageType = GR->getImageType(
3290 OpaqueType, SPIRV::AccessQualifier::ReadOnly, MIRBuilder);
3291 // Create or get an existing type from GlobalRegistry.
3292 return GR->getOrCreateOpTypeSampledImage(OpaqueImageType, MIRBuilder);
3293}
3294
3295static SPIRVType *getInlineSpirvType(const TargetExtType *ExtensionType,
3296 MachineIRBuilder &MIRBuilder,
3297 SPIRVGlobalRegistry *GR) {
3298 assert(ExtensionType->getNumIntParameters() == 3 &&
3299 "Inline SPIR-V type builtin takes an opcode, size, and alignment "
3300 "parameter");
3301 auto Opcode = ExtensionType->getIntParameter(0);
3302
3303 SmallVector<MCOperand> Operands;
3304 for (Type *Param : ExtensionType->type_params()) {
3305 if (const TargetExtType *ParamEType = dyn_cast<TargetExtType>(Param)) {
3306 if (ParamEType->getName() == "spirv.IntegralConstant") {
3307 assert(ParamEType->getNumTypeParameters() == 1 &&
3308 "Inline SPIR-V integral constant builtin must have a type "
3309 "parameter");
3310 assert(ParamEType->getNumIntParameters() == 1 &&
3311 "Inline SPIR-V integral constant builtin must have a "
3312 "value parameter");
3313
3314 auto OperandValue = ParamEType->getIntParameter(0);
3315 auto *OperandType = ParamEType->getTypeParameter(0);
3316
3317 const SPIRVType *OperandSPIRVType = GR->getOrCreateSPIRVType(
3318 OperandType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, true);
3319
3321 OperandValue, MIRBuilder, OperandSPIRVType, true)));
3322 continue;
3323 } else if (ParamEType->getName() == "spirv.Literal") {
3324 assert(ParamEType->getNumTypeParameters() == 0 &&
3325 "Inline SPIR-V literal builtin does not take type "
3326 "parameters");
3327 assert(ParamEType->getNumIntParameters() == 1 &&
3328 "Inline SPIR-V literal builtin must have an integer "
3329 "parameter");
3330
3331 auto OperandValue = ParamEType->getIntParameter(0);
3332
3333 Operands.push_back(MCOperand::createImm(OperandValue));
3334 continue;
3335 }
3336 }
3337 const SPIRVType *TypeOperand = GR->getOrCreateSPIRVType(
3338 Param, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, true);
3339 Operands.push_back(MCOperand::createReg(GR->getSPIRVTypeID(TypeOperand)));
3340 }
3341
3342 return GR->getOrCreateUnknownType(ExtensionType, MIRBuilder, Opcode,
3343 Operands);
3344}
3345
3346static SPIRVType *getVulkanBufferType(const TargetExtType *ExtensionType,
3347 MachineIRBuilder &MIRBuilder,
3348 SPIRVGlobalRegistry *GR) {
3349 assert(ExtensionType->getNumTypeParameters() == 1 &&
3350 "Vulkan buffers have exactly one type for the type of the buffer.");
3351 assert(ExtensionType->getNumIntParameters() == 2 &&
3352 "Vulkan buffer have 2 integer parameters: storage class and is "
3353 "writable.");
3354
3355 auto *T = ExtensionType->getTypeParameter(0);
3356 auto SC = static_cast<SPIRV::StorageClass::StorageClass>(
3357 ExtensionType->getIntParameter(0));
3358 bool IsWritable = ExtensionType->getIntParameter(1);
3359 return GR->getOrCreateVulkanBufferType(MIRBuilder, T, SC, IsWritable);
3360}
3361
3362static SPIRVType *getLayoutType(const TargetExtType *ExtensionType,
3363 MachineIRBuilder &MIRBuilder,
3364 SPIRVGlobalRegistry *GR) {
3365 return GR->getOrCreateLayoutType(MIRBuilder, ExtensionType);
3366}
3367
3368namespace SPIRV {
3370 LLVMContext &Context) {
3371 StringRef NameWithParameters = TypeName;
3372
3373 // Pointers-to-opaque-structs representing OpenCL types are first translated
3374 // to equivalent SPIR-V types. OpenCL builtin type names should have the
3375 // following format: e.g. %opencl.event_t
3376 if (NameWithParameters.starts_with("opencl.")) {
3377 const SPIRV::OpenCLType *OCLTypeRecord =
3378 SPIRV::lookupOpenCLType(NameWithParameters);
3379 if (!OCLTypeRecord)
3380 report_fatal_error("Missing TableGen record for OpenCL type: " +
3381 NameWithParameters);
3382 NameWithParameters = OCLTypeRecord->SpirvTypeLiteral;
3383 // Continue with the SPIR-V builtin type...
3384 }
3385
3386 // Names of the opaque structs representing a SPIR-V builtins without
3387 // parameters should have the following format: e.g. %spirv.Event
3388 assert(NameWithParameters.starts_with("spirv.") &&
3389 "Unknown builtin opaque type!");
3390
3391 // Parameterized SPIR-V builtins names follow this format:
3392 // e.g. %spirv.Image._void_1_0_0_0_0_0_0, %spirv.Pipe._0
3393 if (!NameWithParameters.contains('_'))
3394 return TargetExtType::get(Context, NameWithParameters);
3395
3396 SmallVector<StringRef> Parameters;
3397 unsigned BaseNameLength = NameWithParameters.find('_') - 1;
3398 SplitString(NameWithParameters.substr(BaseNameLength + 1), Parameters, "_");
3399
3400 SmallVector<Type *, 1> TypeParameters;
3401 bool HasTypeParameter = !isDigit(Parameters[0][0]);
3402 if (HasTypeParameter)
3403 TypeParameters.push_back(parseTypeString(Parameters[0], Context));
3404 SmallVector<unsigned> IntParameters;
3405 for (unsigned i = HasTypeParameter ? 1 : 0; i < Parameters.size(); i++) {
3406 unsigned IntParameter = 0;
3407 bool ValidLiteral = !Parameters[i].getAsInteger(10, IntParameter);
3408 (void)ValidLiteral;
3409 assert(ValidLiteral &&
3410 "Invalid format of SPIR-V builtin parameter literal!");
3411 IntParameters.push_back(IntParameter);
3412 }
3413 return TargetExtType::get(Context,
3414 NameWithParameters.substr(0, BaseNameLength),
3415 TypeParameters, IntParameters);
3416}
3417
3419 SPIRV::AccessQualifier::AccessQualifier AccessQual,
3420 MachineIRBuilder &MIRBuilder,
3421 SPIRVGlobalRegistry *GR) {
3422 // In LLVM IR, SPIR-V and OpenCL builtin types are represented as either
3423 // target(...) target extension types or pointers-to-opaque-structs. The
3424 // approach relying on structs is deprecated and works only in the non-opaque
3425 // pointer mode (-opaque-pointers=0).
3426 // In order to maintain compatibility with LLVM IR generated by older versions
3427 // of Clang and LLVM/SPIR-V Translator, the pointers-to-opaque-structs are
3428 // "translated" to target extension types. This translation is temporary and
3429 // will be removed in the future release of LLVM.
3431 if (!BuiltinType)
3433 OpaqueType->getStructName().str(), MIRBuilder.getContext());
3434
3435 unsigned NumStartingVRegs = MIRBuilder.getMRI()->getNumVirtRegs();
3436
3437 const StringRef Name = BuiltinType->getName();
3438 LLVM_DEBUG(dbgs() << "Lowering builtin type: " << Name << "\n");
3439
3440 SPIRVType *TargetType;
3441 if (Name == "spirv.Type") {
3442 TargetType = getInlineSpirvType(BuiltinType, MIRBuilder, GR);
3443 } else if (Name == "spirv.VulkanBuffer") {
3444 TargetType = getVulkanBufferType(BuiltinType, MIRBuilder, GR);
3445 } else if (Name == "spirv.Padding") {
3446 TargetType = GR->getOrCreatePaddingType(MIRBuilder);
3447 } else if (Name == "spirv.Layout") {
3448 TargetType = getLayoutType(BuiltinType, MIRBuilder, GR);
3449 } else {
3450 // Lookup the demangled builtin type in the TableGen records.
3451 const SPIRV::BuiltinType *TypeRecord = SPIRV::lookupBuiltinType(Name);
3452 if (!TypeRecord)
3453 report_fatal_error("Missing TableGen record for builtin type: " + Name);
3454
3455 // "Lower" the BuiltinType into TargetType. The following get<...>Type
3456 // methods use the implementation details from TableGen records or
3457 // TargetExtType parameters to either create a new OpType<...> machine
3458 // instruction or get an existing equivalent SPIRVType from
3459 // GlobalRegistry.
3460
3461 switch (TypeRecord->Opcode) {
3462 case SPIRV::OpTypeImage:
3463 TargetType = GR->getImageType(BuiltinType, AccessQual, MIRBuilder);
3464 break;
3465 case SPIRV::OpTypePipe:
3466 TargetType = getPipeType(BuiltinType, MIRBuilder, GR);
3467 break;
3468 case SPIRV::OpTypeDeviceEvent:
3469 TargetType = GR->getOrCreateOpTypeDeviceEvent(MIRBuilder);
3470 break;
3471 case SPIRV::OpTypeSampler:
3472 TargetType = getSamplerType(MIRBuilder, GR);
3473 break;
3474 case SPIRV::OpTypeSampledImage:
3475 TargetType = getSampledImageType(BuiltinType, MIRBuilder, GR);
3476 break;
3477 case SPIRV::OpTypeCooperativeMatrixKHR:
3478 TargetType = getCoopMatrType(BuiltinType, MIRBuilder, GR);
3479 break;
3480 default:
3481 TargetType =
3482 getNonParameterizedType(BuiltinType, TypeRecord, MIRBuilder, GR);
3483 break;
3484 }
3485 }
3486
3487 // Emit OpName instruction if a new OpType<...> instruction was added
3488 // (equivalent type was not found in GlobalRegistry).
3489 if (NumStartingVRegs < MIRBuilder.getMRI()->getNumVirtRegs())
3490 buildOpName(GR->getSPIRVTypeID(TargetType), Name, MIRBuilder);
3491
3492 return TargetType;
3493}
3494} // namespace SPIRV
3495} // namespace llvm
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Lower Kernel Arguments
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
Promote Memory to Register
Definition Mem2Reg.cpp:110
#define T
spirv structurize SPIRV
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file contains some functions that are useful when dealing with strings.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static const fltSemantics & IEEEsingle()
Definition APFloat.h:296
APInt bitcastToAPInt() const
Definition APFloat.h:1335
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Definition APFloat.h:1061
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1541
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
const APFloat & getValueAPF() const
Definition Constants.h:325
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
Tagged union holding either a T or a Error.
Definition Error.h:485
Class to represent fixed width SIMD vectors.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
static MCOperand createReg(MCRegister Reg)
Definition MCInst.h:138
static MCOperand createImm(int64_t Val)
Definition MCInst.h:145
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
LLVMContext & getContext() const
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
const DataLayout & getDataLayout() const
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
LLVM_ABI void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
const GlobalValue * getGlobal() const
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
const MDNode * getMetadata() const
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
LLVM_ABI void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
unsigned getNumVirtRegs() const
getNumVirtRegs - Return the number of virtual registers created.
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isValid() const
Definition Register.h:112
SPIRVType * getOrCreateOpTypePipe(MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AccQual)
SPIRVType * getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
SPIRVType * getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder, bool EmitIR)
SPIRVType * getOrCreatePaddingType(MachineIRBuilder &MIRBuilder)
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, const MachineFunction &MF)
Register buildGlobalVariable(Register Reg, SPIRVType *BaseType, StringRef Name, const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage, const MachineInstr *Init, bool IsConst, const std::optional< SPIRV::LinkageType::LinkageType > &LinkageType, MachineIRBuilder &MIRBuilder, bool IsInstSelector)
const Type * getTypeForSPIRVType(const SPIRVType *Ty) const
SPIRVType * getOrCreateUnknownType(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode, const ArrayRef< MCOperand > Operands)
unsigned getScalarOrVectorComponentCount(Register VReg) const
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineInstr &I, SPIRV::AccessQualifier::AccessQualifier AQ, bool EmitIR)
SPIRVType * getOrCreateSPIRVPointerType(const Type *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SC)
SPIRVType * getOrCreateOpTypeByOpcode(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode)
Register buildConstantFP(APFloat Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr)
SPIRVType * getPointeeType(SPIRVType *PtrType)
Register getSPIRVTypeID(const SPIRVType *SpirvType) const
bool isScalarOfType(Register VReg, unsigned TypeOpcode) const
SPIRVType * getOrCreateOpTypeSampledImage(SPIRVType *ImageType, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateVulkanBufferType(MachineIRBuilder &MIRBuilder, Type *ElemType, SPIRV::StorageClass::StorageClass SC, bool IsWritable, bool EmitIr=false)
SPIRVType * getOrCreateSPIRVTypeByName(StringRef TypeStr, MachineIRBuilder &MIRBuilder, bool EmitIR, SPIRV::StorageClass::StorageClass SC=SPIRV::StorageClass::Function, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite)
SPIRVType * getOrCreateLayoutType(MachineIRBuilder &MIRBuilder, const TargetExtType *T, bool EmitIr=false)
Register getOrCreateConsIntVector(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, bool EmitIR)
const TargetRegisterClass * getRegClass(SPIRVType *SpvType) const
SPIRVType * getOrCreateSPIRVVectorType(SPIRVType *BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder, bool EmitIR)
SPIRVType * getOrCreateOpTypeCoopMatr(MachineIRBuilder &MIRBuilder, const TargetExtType *ExtensionType, const SPIRVType *ElemType, uint32_t Scope, uint32_t Rows, uint32_t Columns, uint32_t Use, bool EmitIR)
bool isScalarOrVectorOfType(Register VReg, unsigned TypeOpcode) const
Register getOrCreateConstIntArray(uint64_t Val, size_t Num, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
SPIRVType * getOrCreateOpTypeDeviceEvent(MachineIRBuilder &MIRBuilder)
SPIRVType * getImageType(const TargetExtType *ExtensionType, const SPIRV::AccessQualifier::AccessQualifier Qualifier, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineIRBuilder &MIRBuilder)
Register buildConstantInt(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, bool EmitIR, bool ZeroAsNull=true)
LLT getRegType(SPIRVType *SpvType) const
SPIRV::StorageClass::StorageClass getPointerStorageClass(Register VReg) const
SPIRVType * getOrCreateOpTypeSampler(MachineIRBuilder &MIRBuilder)
Register buildConstantSampler(Register Res, unsigned AddrMode, unsigned Param, unsigned FilerMode, MachineIRBuilder &MIRBuilder)
Register getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorBitWidth(const SPIRVType *Type) const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:702
static constexpr size_t npos
Definition StringRef.h:57
bool consume_back(StringRef Suffix)
Returns true if this StringRef has the given suffix and removes that suffix.
Definition StringRef.h:657
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:472
std::string str() const
str - Get the contents as an std::string.
Definition StringRef.h:225
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition StringRef.h:573
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
bool contains_insensitive(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition StringRef.h:438
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
Definition StringRef.h:686
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition StringRef.h:426
bool consume_front(StringRef Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
Definition StringRef.h:637
size_t find_first_of(char C, size_t From=0) const
Find the first character in the string that is C, or npos if not found.
Definition StringRef.h:376
size_t rfind(char C, size_t From=npos) const
Search for the last character C in the string.
Definition StringRef.h:345
size_t find(char C, size_t From=0) const
Search for the first character C in the string.
Definition StringRef.h:293
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition StringRef.h:273
A switch()-like statement whose cases are string literals.
StringSwitch & EndsWith(StringLiteral S, T Value)
Class to represent struct types.
Class to represent target extensions types, which are generally unintrospectable from target-independ...
ArrayRef< Type * > type_params() const
Return the type parameters for this particular target extension type.
unsigned getNumIntParameters() const
static LLVM_ABI TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types={}, ArrayRef< unsigned > Ints={})
Return a target extension type having the specified name and optional type and integer parameters.
Definition Type.cpp:907
Type * getTypeParameter(unsigned i) const
unsigned getNumTypeParameters() const
unsigned getIntParameter(unsigned i) const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
LLVM_ABI StringRef getStructName() const
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:280
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:294
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
Definition Type.cpp:284
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
Definition Type.cpp:282
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
LLVM Value Representation.
Definition Value.h:75
LLVM_ABI Value(Type *Ty, unsigned scid)
Definition Value.cpp:53
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Represents a version number in the form major[.minor[.subminor[.build]]].
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
CallInst * Call
LLVM_C_ABI LLVMTypeRef LLVMVectorType(LLVMTypeRef ElementType, unsigned ElementCount)
Create a vector type that contains a defined type and has a specific number of elements.
Definition Core.cpp:887
std::string lookupBuiltinNameHelper(StringRef DemangledCall, FPDecorationId *DecorationId)
Parses the name part of the demangled builtin call.
Type * parseBuiltinCallArgumentType(StringRef TypeStr, LLVMContext &Ctx)
bool parseBuiltinTypeStr(SmallVector< StringRef, 10 > &BuiltinArgsTypeStrs, const StringRef DemangledCall, LLVMContext &Ctx)
std::optional< bool > lowerBuiltin(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, MachineIRBuilder &MIRBuilder, const Register OrigRet, const Type *OrigRetTy, const SmallVectorImpl< Register > &Args, SPIRVGlobalRegistry *GR, const CallBase &CB)
std::tuple< int, unsigned, unsigned > mapBuiltinToOpcode(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set)
Helper function for finding a builtin function attributes by a demangled function name.
Type * parseBuiltinCallArgumentBaseType(const StringRef DemangledCall, unsigned ArgIdx, LLVMContext &Ctx)
Parses the provided ArgIdx argument base type in the DemangledCall skeleton.
TargetExtType * parseBuiltinTypeNameToTargetExtType(std::string TypeName, LLVMContext &Context)
Translates a string representing a SPIR-V or OpenCL builtin type to a TargetExtType that can be furth...
SPIRVType * lowerBuiltinType(const Type *OpaqueType, SPIRV::AccessQualifier::AccessQualifier AccessQual, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
static bool build2DBlockIOINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's 2d block io instructions.
static SPIRVType * getVulkanBufferType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateExtInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, const CallBase &CB)
static bool generateBindlessImageINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateGetQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getInlineSpirvType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateConstructInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicFlagInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic flag instructions (e.g.
static bool generateImageSizeQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::SamplerFilterMode::SamplerFilterMode getSamplerFilterModeFromBitmask(unsigned Bitmask)
static bool buildAtomicStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic store instruction.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
static bool buildExtendedBitOpsInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building extended bit operations.
static const Type * getBlockStructType(Register ParamReg, MachineRegisterInfo *MRI)
static bool generateGroupInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
FPDecorationId demangledPostfixToDecorationId(const std::string &S)
Definition SPIRVUtils.h:549
void updateRegType(Register Reg, Type *Ty, SPIRVType *SpirvTy, SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Helper external function for assigning SPIRVType to a register, ensuring the register class and type ...
static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim)
static bool generateICarryBorrowInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildScopeReg(Register CLScopeRegister, SPIRV::Scope::Scope Scope, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI)
FPDecorationId
Definition SPIRVUtils.h:547
static std::tuple< Register, SPIRVType * > buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType, SPIRVGlobalRegistry *GR)
Helper function building either a resulting scalar or vector bool register depending on the expected ...
static unsigned getNumSizeComponents(SPIRVType *imgType)
Helper function for obtaining the number of size components.
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
static Register buildConstantIntReg32(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getSampledImageType(const TargetExtType *OpaqueType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
SmallVector< MachineInstr *, 4 > createContinuedInstructions(MachineIRBuilder &MIRBuilder, unsigned Opcode, unsigned MinWC, unsigned ContinuedOpcode, ArrayRef< Register > Args, Register ReturnRegister, Register TypeID)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
constexpr unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
Definition SPIRVUtils.h:244
static bool generateSampleImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateBarrierInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI void SplitString(StringRef Source, SmallVectorImpl< StringRef > &OutFragments, StringRef Delimiters=" \t\n\v\f\r")
SplitString - Split up the specified string according to the specified delimiters,...
static SPIRVType * getCoopMatrType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateKernelClockInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static void setRegClassIfNull(Register Reg, MachineRegisterInfo *MRI, SPIRVGlobalRegistry *GR)
static bool generateGroupUniformInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateWaveInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Register createVirtualRegister(SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building barriers, i.e., memory/control ordering operations.
static bool generateAsyncCopy(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope)
static bool buildAPFixedPointInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getSamplerType(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateBlockingPipesInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
static Register buildLoadInst(SPIRVType *BaseType, Register PtrRegister, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, LLT LowLevelType, Register DestinationReg=Register(0))
Helper function for building a load instruction loading into the DestinationReg.
static bool generateEnqueueInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
static bool buildSelectInst(MachineIRBuilder &MIRBuilder, Register ReturnRegister, Register SourceRegister, const SPIRVType *ReturnType, SPIRVGlobalRegistry *GR)
Helper function for building either a vector or scalar select instruction depending on the expected R...
static const Type * getMachineInstrType(MachineInstr *MI)
bool isDigit(char C)
Checks if character C is one of the 10 decimal digits.
static SPIRV::SamplerAddressingMode::SamplerAddressingMode getSamplerAddressingModeFromBitmask(unsigned Bitmask)
static bool generateAtomicInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
const MachineInstr SPIRVType
static SPIRVType * getLayoutType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateDotOrFMulInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildBuiltinVariableLoad(MachineIRBuilder &MIRBuilder, SPIRVType *VariableType, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType, Register Reg=Register(0), bool isConst=true, const std::optional< SPIRV::LinkageType::LinkageType > &LinkageTy={ SPIRV::LinkageType::Import})
Helper function for building a load instruction for loading a builtin global variable of BuiltinValue...
static bool generateConvertInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateTernaryBitwiseFunctionINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateCastToPtrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
constexpr bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
Definition SPIRVUtils.h:229
static Register buildMemSemanticsReg(Register SemanticsRegister, Register PtrRegister, unsigned &Semantics, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static unsigned getConstFromIntrinsic(Register Reg, MachineRegisterInfo *MRI)
static bool generateImageMiscQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSelectInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic load instruction.
static bool generateIntelSubgroupsInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateExtendedBitOpsInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildPipeInst(const SPIRV::IncomingCall *Call, unsigned Opcode, unsigned Scope, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSpecConstantInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getOrCreateSPIRVDeviceEventPointer(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * parseBasicTypeName(StringRef &TypeName, LLVMContext &Ctx)
static bool generateVectorLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, uint64_t DefaultValue)
static bool generateCoopMatrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildBindlessImageINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's bindless image instructions.
static std::unique_ptr< const SPIRV::IncomingCall > lookupBuiltin(StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
Looks up the demangled builtin call in the SPIRVBuiltins.td records using the provided DemangledCall ...
static bool buildAtomicFloatingRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic floating-type instruction.
MachineInstr * getDefInstrMaybeConstant(Register &ConstReg, const MachineRegisterInfo *MRI)
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1879
static bool generate2DBlockIOINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateReadImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
bool hasBuiltinTypePrefix(StringRef Name)
static bool buildEnqueueKernel(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * getMDOperandAsType(const MDNode *N, unsigned I)
static bool generatePipeInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildTernaryBitwiseFunctionINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's OpBitwiseFunctionINTEL instruction.
static bool generateAPFixedPointInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic instructions.
static SPIRV::MemorySemantics::MemorySemantics getSPIRVMemSemantics(std::memory_order MemOrder)
static bool generateRelationalInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicInitInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
Helper function for translating atomic init to OpStore.
static bool generateWriteImageInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getPipeType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Type * parseTypeString(const StringRef Name, LLVMContext &Context)
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
static bool generatePredicatedLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateAtomicFloatingInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildNDRange(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getNonParameterizedType(const TargetExtType *ExtensionType, const SPIRV::BuiltinType *TypeRecord, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static MachineInstr * getBlockStructInstr(Register ParamReg, MachineRegisterInfo *MRI)
static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode, const SPIRV::IncomingCall *Call, Register TypeReg, ArrayRef< uint32_t > ImmArgs={})
static unsigned getSamplerParamFromBitmask(unsigned Bitmask)
static bool buildAtomicCompareExchangeInst(const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic compare-exchange instruction.
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
static bool generateBuiltinVar(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:870
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
This class contains a discriminated union of information about pointers in memory operands,...
FPRoundingMode::FPRoundingMode RoundingMode
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
const SmallVectorImpl< Register > & Arguments
const std::string BuiltinName
const SPIRVType * ReturnType
const DemangledBuiltin * Builtin
IncomingCall(const std::string BuiltinName, const DemangledBuiltin *Builtin, const Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
FPRoundingMode::FPRoundingMode RoundingMode