LLVM 19.0.0git
SPIRVBuiltins.cpp
Go to the documentation of this file.
1//===- SPIRVBuiltins.cpp - SPIR-V Built-in Functions ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements lowering builtin function calls and types using their
10// demangled names and TableGen records.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SPIRVBuiltins.h"
15#include "SPIRV.h"
16#include "SPIRVSubtarget.h"
17#include "SPIRVUtils.h"
20#include "llvm/IR/IntrinsicsSPIRV.h"
21#include <string>
22#include <tuple>
23
24#define DEBUG_TYPE "spirv-builtins"
25
26namespace llvm {
27namespace SPIRV {
28#define GET_BuiltinGroup_DECL
29#include "SPIRVGenTables.inc"
30
33 InstructionSet::InstructionSet Set;
34 BuiltinGroup Group;
35 uint8_t MinNumArgs;
36 uint8_t MaxNumArgs;
37};
38
39#define GET_DemangledBuiltins_DECL
40#define GET_DemangledBuiltins_IMPL
41
43 const std::string BuiltinName;
45
49
56};
57
60 InstructionSet::InstructionSet Set;
62};
63
64#define GET_NativeBuiltins_DECL
65#define GET_NativeBuiltins_IMPL
66
71 bool IsElect;
81};
82
83#define GET_GroupBuiltins_DECL
84#define GET_GroupBuiltins_IMPL
85
89 bool IsBlock;
90 bool IsWrite;
91};
92
93#define GET_IntelSubgroupsBuiltins_DECL
94#define GET_IntelSubgroupsBuiltins_IMPL
95
99};
100
101#define GET_AtomicFloatingBuiltins_DECL
102#define GET_AtomicFloatingBuiltins_IMPL
107};
108
109#define GET_GroupUniformBuiltins_DECL
110#define GET_GroupUniformBuiltins_IMPL
111
114 InstructionSet::InstructionSet Set;
115 BuiltIn::BuiltIn Value;
116};
117
118using namespace BuiltIn;
119#define GET_GetBuiltins_DECL
120#define GET_GetBuiltins_IMPL
121
124 InstructionSet::InstructionSet Set;
126};
127
128#define GET_ImageQueryBuiltins_DECL
129#define GET_ImageQueryBuiltins_IMPL
130
133 InstructionSet::InstructionSet Set;
137 FPRoundingMode::FPRoundingMode RoundingMode;
138};
139
142 InstructionSet::InstructionSet Set;
146 FPRoundingMode::FPRoundingMode RoundingMode;
147};
148
149using namespace FPRoundingMode;
150#define GET_ConvertBuiltins_DECL
151#define GET_ConvertBuiltins_IMPL
152
153using namespace InstructionSet;
154#define GET_VectorLoadStoreBuiltins_DECL
155#define GET_VectorLoadStoreBuiltins_IMPL
156
157#define GET_CLMemoryScope_DECL
158#define GET_CLSamplerAddressingMode_DECL
159#define GET_CLMemoryFenceFlags_DECL
160#define GET_ExtendedBuiltins_DECL
161#include "SPIRVGenTables.inc"
162} // namespace SPIRV
163
164//===----------------------------------------------------------------------===//
165// Misc functions for looking up builtins and veryfying requirements using
166// TableGen records
167//===----------------------------------------------------------------------===//
168
169/// Looks up the demangled builtin call in the SPIRVBuiltins.td records using
170/// the provided \p DemangledCall and specified \p Set.
171///
172/// The lookup follows the following algorithm, returning the first successful
173/// match:
174/// 1. Search with the plain demangled name (expecting a 1:1 match).
175/// 2. Search with the prefix before or suffix after the demangled name
176/// signyfying the type of the first argument.
177///
178/// \returns Wrapper around the demangled call and found builtin definition.
179static std::unique_ptr<const SPIRV::IncomingCall>
181 SPIRV::InstructionSet::InstructionSet Set,
182 Register ReturnRegister, const SPIRVType *ReturnType,
184 // Extract the builtin function name and types of arguments from the call
185 // skeleton.
186 std::string BuiltinName =
187 DemangledCall.substr(0, DemangledCall.find('(')).str();
188
189 // Check if the extracted name contains type information between angle
190 // brackets. If so, the builtin is an instantiated template - needs to have
191 // the information after angle brackets and return type removed.
192 if (BuiltinName.find('<') && BuiltinName.back() == '>') {
193 BuiltinName = BuiltinName.substr(0, BuiltinName.find('<'));
194 BuiltinName = BuiltinName.substr(BuiltinName.find_last_of(' ') + 1);
195 }
196
197 // Check if the extracted name begins with "__spirv_ImageSampleExplicitLod"
198 // contains return type information at the end "_R<type>", if so extract the
199 // plain builtin name without the type information.
200 if (StringRef(BuiltinName).contains("__spirv_ImageSampleExplicitLod") &&
201 StringRef(BuiltinName).contains("_R")) {
202 BuiltinName = BuiltinName.substr(0, BuiltinName.find("_R"));
203 }
204
205 SmallVector<StringRef, 10> BuiltinArgumentTypes;
206 StringRef BuiltinArgs =
207 DemangledCall.slice(DemangledCall.find('(') + 1, DemangledCall.find(')'));
208 BuiltinArgs.split(BuiltinArgumentTypes, ',', -1, false);
209
210 // Look up the builtin in the defined set. Start with the plain demangled
211 // name, expecting a 1:1 match in the defined builtin set.
212 const SPIRV::DemangledBuiltin *Builtin;
213 if ((Builtin = SPIRV::lookupBuiltin(BuiltinName, Set)))
214 return std::make_unique<SPIRV::IncomingCall>(
215 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
216
217 // If the initial look up was unsuccessful and the demangled call takes at
218 // least 1 argument, add a prefix or suffix signifying the type of the first
219 // argument and repeat the search.
220 if (BuiltinArgumentTypes.size() >= 1) {
221 char FirstArgumentType = BuiltinArgumentTypes[0][0];
222 // Prefix to be added to the builtin's name for lookup.
223 // For example, OpenCL "abs" taking an unsigned value has a prefix "u_".
224 std::string Prefix;
225
226 switch (FirstArgumentType) {
227 // Unsigned:
228 case 'u':
229 if (Set == SPIRV::InstructionSet::OpenCL_std)
230 Prefix = "u_";
231 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
232 Prefix = "u";
233 break;
234 // Signed:
235 case 'c':
236 case 's':
237 case 'i':
238 case 'l':
239 if (Set == SPIRV::InstructionSet::OpenCL_std)
240 Prefix = "s_";
241 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
242 Prefix = "s";
243 break;
244 // Floating-point:
245 case 'f':
246 case 'd':
247 case 'h':
248 if (Set == SPIRV::InstructionSet::OpenCL_std ||
249 Set == SPIRV::InstructionSet::GLSL_std_450)
250 Prefix = "f";
251 break;
252 }
253
254 // If argument-type name prefix was added, look up the builtin again.
255 if (!Prefix.empty() &&
256 (Builtin = SPIRV::lookupBuiltin(Prefix + BuiltinName, Set)))
257 return std::make_unique<SPIRV::IncomingCall>(
258 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
259
260 // If lookup with a prefix failed, find a suffix to be added to the
261 // builtin's name for lookup. For example, OpenCL "group_reduce_max" taking
262 // an unsigned value has a suffix "u".
263 std::string Suffix;
264
265 switch (FirstArgumentType) {
266 // Unsigned:
267 case 'u':
268 Suffix = "u";
269 break;
270 // Signed:
271 case 'c':
272 case 's':
273 case 'i':
274 case 'l':
275 Suffix = "s";
276 break;
277 // Floating-point:
278 case 'f':
279 case 'd':
280 case 'h':
281 Suffix = "f";
282 break;
283 }
284
285 // If argument-type name suffix was added, look up the builtin again.
286 if (!Suffix.empty() &&
287 (Builtin = SPIRV::lookupBuiltin(BuiltinName + Suffix, Set)))
288 return std::make_unique<SPIRV::IncomingCall>(
289 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
290 }
291
292 // No builtin with such name was found in the set.
293 return nullptr;
294}
295
296//===----------------------------------------------------------------------===//
297// Helper functions for building misc instructions
298//===----------------------------------------------------------------------===//
299
300/// Helper function building either a resulting scalar or vector bool register
301/// depending on the expected \p ResultType.
302///
303/// \returns Tuple of the resulting register and its type.
304static std::tuple<Register, SPIRVType *>
305buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType,
307 LLT Type;
308 SPIRVType *BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder);
309
310 if (ResultType->getOpcode() == SPIRV::OpTypeVector) {
311 unsigned VectorElements = ResultType->getOperand(2).getImm();
312 BoolType =
313 GR->getOrCreateSPIRVVectorType(BoolType, VectorElements, MIRBuilder);
315 cast<FixedVectorType>(GR->getTypeForSPIRVType(BoolType));
316 Type = LLT::vector(LLVMVectorType->getElementCount(), 1);
317 } else {
318 Type = LLT::scalar(1);
319 }
320
321 Register ResultRegister =
323 MIRBuilder.getMRI()->setRegClass(ResultRegister, &SPIRV::IDRegClass);
324 GR->assignSPIRVTypeToVReg(BoolType, ResultRegister, MIRBuilder.getMF());
325 return std::make_tuple(ResultRegister, BoolType);
326}
327
328/// Helper function for building either a vector or scalar select instruction
329/// depending on the expected \p ResultType.
330static bool buildSelectInst(MachineIRBuilder &MIRBuilder,
331 Register ReturnRegister, Register SourceRegister,
332 const SPIRVType *ReturnType,
334 Register TrueConst, FalseConst;
335
336 if (ReturnType->getOpcode() == SPIRV::OpTypeVector) {
337 unsigned Bits = GR->getScalarOrVectorBitWidth(ReturnType);
339 TrueConst = GR->getOrCreateConsIntVector(AllOnes, MIRBuilder, ReturnType);
340 FalseConst = GR->getOrCreateConsIntVector(0, MIRBuilder, ReturnType);
341 } else {
342 TrueConst = GR->buildConstantInt(1, MIRBuilder, ReturnType);
343 FalseConst = GR->buildConstantInt(0, MIRBuilder, ReturnType);
344 }
345 return MIRBuilder.buildSelect(ReturnRegister, SourceRegister, TrueConst,
346 FalseConst);
347}
348
349/// Helper function for building a load instruction loading into the
350/// \p DestinationReg.
352 MachineIRBuilder &MIRBuilder,
353 SPIRVGlobalRegistry *GR, LLT LowLevelType,
354 Register DestinationReg = Register(0)) {
355 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
356 if (!DestinationReg.isValid()) {
357 DestinationReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
358 MRI->setType(DestinationReg, LLT::scalar(32));
359 GR->assignSPIRVTypeToVReg(BaseType, DestinationReg, MIRBuilder.getMF());
360 }
361 // TODO: consider using correct address space and alignment (p0 is canonical
362 // type for selection though).
364 MIRBuilder.buildLoad(DestinationReg, PtrRegister, PtrInfo, Align());
365 return DestinationReg;
366}
367
368/// Helper function for building a load instruction for loading a builtin global
369/// variable of \p BuiltinValue value.
371 SPIRVType *VariableType,
373 SPIRV::BuiltIn::BuiltIn BuiltinValue,
374 LLT LLType,
375 Register Reg = Register(0)) {
376 Register NewRegister =
377 MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::IDRegClass);
378 MIRBuilder.getMRI()->setType(NewRegister,
379 LLT::pointer(0, GR->getPointerSize()));
381 VariableType, MIRBuilder, SPIRV::StorageClass::Input);
382 GR->assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
383
384 // Set up the global OpVariable with the necessary builtin decorations.
385 Register Variable = GR->buildGlobalVariable(
386 NewRegister, PtrType, getLinkStringForBuiltIn(BuiltinValue), nullptr,
387 SPIRV::StorageClass::Input, nullptr, true, true,
388 SPIRV::LinkageType::Import, MIRBuilder, false);
389
390 // Load the value from the global variable.
391 Register LoadedRegister =
392 buildLoadInst(VariableType, Variable, MIRBuilder, GR, LLType, Reg);
393 MIRBuilder.getMRI()->setType(LoadedRegister, LLType);
394 return LoadedRegister;
395}
396
397/// Helper external function for inserting ASSIGN_TYPE instuction between \p Reg
398/// and its definition, set the new register as a destination of the definition,
399/// assign SPIRVType to both registers. If SpirvTy is provided, use it as
400/// SPIRVType in ASSIGN_TYPE, otherwise create it from \p Ty. Defined in
401/// SPIRVPreLegalizer.cpp.
402extern Register insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy,
403 SPIRVGlobalRegistry *GR,
404 MachineIRBuilder &MIB,
405 MachineRegisterInfo &MRI);
406
407// TODO: Move to TableGen.
408static SPIRV::MemorySemantics::MemorySemantics
409getSPIRVMemSemantics(std::memory_order MemOrder) {
410 switch (MemOrder) {
411 case std::memory_order::memory_order_relaxed:
412 return SPIRV::MemorySemantics::None;
413 case std::memory_order::memory_order_acquire:
414 return SPIRV::MemorySemantics::Acquire;
415 case std::memory_order::memory_order_release:
416 return SPIRV::MemorySemantics::Release;
417 case std::memory_order::memory_order_acq_rel:
418 return SPIRV::MemorySemantics::AcquireRelease;
419 case std::memory_order::memory_order_seq_cst:
420 return SPIRV::MemorySemantics::SequentiallyConsistent;
421 default:
422 report_fatal_error("Unknown CL memory scope");
423 }
424}
425
426static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope) {
427 switch (ClScope) {
428 case SPIRV::CLMemoryScope::memory_scope_work_item:
429 return SPIRV::Scope::Invocation;
430 case SPIRV::CLMemoryScope::memory_scope_work_group:
431 return SPIRV::Scope::Workgroup;
432 case SPIRV::CLMemoryScope::memory_scope_device:
433 return SPIRV::Scope::Device;
434 case SPIRV::CLMemoryScope::memory_scope_all_svm_devices:
435 return SPIRV::Scope::CrossDevice;
436 case SPIRV::CLMemoryScope::memory_scope_sub_group:
437 return SPIRV::Scope::Subgroup;
438 }
439 report_fatal_error("Unknown CL memory scope");
440}
441
444 unsigned BitWidth = 32) {
445 SPIRVType *IntType = GR->getOrCreateSPIRVIntegerType(BitWidth, MIRBuilder);
446 return GR->buildConstantInt(Val, MIRBuilder, IntType);
447}
448
449static Register buildScopeReg(Register CLScopeRegister,
450 SPIRV::Scope::Scope Scope,
451 MachineIRBuilder &MIRBuilder,
454 if (CLScopeRegister.isValid()) {
455 auto CLScope =
456 static_cast<SPIRV::CLMemoryScope>(getIConstVal(CLScopeRegister, MRI));
457 Scope = getSPIRVScope(CLScope);
458
459 if (CLScope == static_cast<unsigned>(Scope)) {
460 MRI->setRegClass(CLScopeRegister, &SPIRV::IDRegClass);
461 return CLScopeRegister;
462 }
463 }
464 return buildConstantIntReg(Scope, MIRBuilder, GR);
465}
466
467static Register buildMemSemanticsReg(Register SemanticsRegister,
468 Register PtrRegister, unsigned &Semantics,
469 MachineIRBuilder &MIRBuilder,
471 if (SemanticsRegister.isValid()) {
472 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
473 std::memory_order Order =
474 static_cast<std::memory_order>(getIConstVal(SemanticsRegister, MRI));
475 Semantics =
476 getSPIRVMemSemantics(Order) |
478
479 if (Order == Semantics) {
480 MRI->setRegClass(SemanticsRegister, &SPIRV::IDRegClass);
481 return SemanticsRegister;
482 }
483 }
484 return buildConstantIntReg(Semantics, MIRBuilder, GR);
485}
486
487/// Helper function for translating atomic init to OpStore.
489 MachineIRBuilder &MIRBuilder) {
490 assert(Call->Arguments.size() == 2 &&
491 "Need 2 arguments for atomic init translation");
492 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
493 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
494 MIRBuilder.buildInstr(SPIRV::OpStore)
495 .addUse(Call->Arguments[0])
496 .addUse(Call->Arguments[1]);
497 return true;
498}
499
500/// Helper function for building an atomic load instruction.
502 MachineIRBuilder &MIRBuilder,
504 Register PtrRegister = Call->Arguments[0];
505 MIRBuilder.getMRI()->setRegClass(PtrRegister, &SPIRV::IDRegClass);
506 // TODO: if true insert call to __translate_ocl_memory_sccope before
507 // OpAtomicLoad and the function implementation. We can use Translator's
508 // output for transcoding/atomic_explicit_arguments.cl as an example.
509 Register ScopeRegister;
510 if (Call->Arguments.size() > 1) {
511 ScopeRegister = Call->Arguments[1];
512 MIRBuilder.getMRI()->setRegClass(ScopeRegister, &SPIRV::IDRegClass);
513 } else
514 ScopeRegister = buildConstantIntReg(SPIRV::Scope::Device, MIRBuilder, GR);
515
516 Register MemSemanticsReg;
517 if (Call->Arguments.size() > 2) {
518 // TODO: Insert call to __translate_ocl_memory_order before OpAtomicLoad.
519 MemSemanticsReg = Call->Arguments[2];
520 MIRBuilder.getMRI()->setRegClass(MemSemanticsReg, &SPIRV::IDRegClass);
521 } else {
522 int Semantics =
523 SPIRV::MemorySemantics::SequentiallyConsistent |
525 MemSemanticsReg = buildConstantIntReg(Semantics, MIRBuilder, GR);
526 }
527
528 MIRBuilder.buildInstr(SPIRV::OpAtomicLoad)
529 .addDef(Call->ReturnRegister)
530 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
531 .addUse(PtrRegister)
532 .addUse(ScopeRegister)
533 .addUse(MemSemanticsReg);
534 return true;
535}
536
537/// Helper function for building an atomic store instruction.
539 MachineIRBuilder &MIRBuilder,
541 Register ScopeRegister =
542 buildConstantIntReg(SPIRV::Scope::Device, MIRBuilder, GR);
543 Register PtrRegister = Call->Arguments[0];
544 MIRBuilder.getMRI()->setRegClass(PtrRegister, &SPIRV::IDRegClass);
545 int Semantics =
546 SPIRV::MemorySemantics::SequentiallyConsistent |
548 Register MemSemanticsReg = buildConstantIntReg(Semantics, MIRBuilder, GR);
549 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
550 MIRBuilder.buildInstr(SPIRV::OpAtomicStore)
551 .addUse(PtrRegister)
552 .addUse(ScopeRegister)
553 .addUse(MemSemanticsReg)
554 .addUse(Call->Arguments[1]);
555 return true;
556}
557
558/// Helper function for building an atomic compare-exchange instruction.
560 MachineIRBuilder &MIRBuilder,
562 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
563 unsigned Opcode =
564 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
565 bool IsCmpxchg = Call->Builtin->Name.contains("cmpxchg");
566 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
567
568 Register ObjectPtr = Call->Arguments[0]; // Pointer (volatile A *object.)
569 Register ExpectedArg = Call->Arguments[1]; // Comparator (C* expected).
570 Register Desired = Call->Arguments[2]; // Value (C Desired).
571 MRI->setRegClass(ObjectPtr, &SPIRV::IDRegClass);
572 MRI->setRegClass(ExpectedArg, &SPIRV::IDRegClass);
573 MRI->setRegClass(Desired, &SPIRV::IDRegClass);
574 SPIRVType *SpvDesiredTy = GR->getSPIRVTypeForVReg(Desired);
575 LLT DesiredLLT = MRI->getType(Desired);
576
577 assert(GR->getSPIRVTypeForVReg(ObjectPtr)->getOpcode() ==
578 SPIRV::OpTypePointer);
579 unsigned ExpectedType = GR->getSPIRVTypeForVReg(ExpectedArg)->getOpcode();
580 (void)ExpectedType;
581 assert(IsCmpxchg ? ExpectedType == SPIRV::OpTypeInt
582 : ExpectedType == SPIRV::OpTypePointer);
583 assert(GR->isScalarOfType(Desired, SPIRV::OpTypeInt));
584
585 SPIRVType *SpvObjectPtrTy = GR->getSPIRVTypeForVReg(ObjectPtr);
586 assert(SpvObjectPtrTy->getOperand(2).isReg() && "SPIRV type is expected");
587 auto StorageClass = static_cast<SPIRV::StorageClass::StorageClass>(
588 SpvObjectPtrTy->getOperand(1).getImm());
589 auto MemSemStorage = getMemSemanticsForStorageClass(StorageClass);
590
591 Register MemSemEqualReg;
592 Register MemSemUnequalReg;
593 uint64_t MemSemEqual =
594 IsCmpxchg
595 ? SPIRV::MemorySemantics::None
596 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
597 uint64_t MemSemUnequal =
598 IsCmpxchg
599 ? SPIRV::MemorySemantics::None
600 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
601 if (Call->Arguments.size() >= 4) {
602 assert(Call->Arguments.size() >= 5 &&
603 "Need 5+ args for explicit atomic cmpxchg");
604 auto MemOrdEq =
605 static_cast<std::memory_order>(getIConstVal(Call->Arguments[3], MRI));
606 auto MemOrdNeq =
607 static_cast<std::memory_order>(getIConstVal(Call->Arguments[4], MRI));
608 MemSemEqual = getSPIRVMemSemantics(MemOrdEq) | MemSemStorage;
609 MemSemUnequal = getSPIRVMemSemantics(MemOrdNeq) | MemSemStorage;
610 if (MemOrdEq == MemSemEqual)
611 MemSemEqualReg = Call->Arguments[3];
612 if (MemOrdNeq == MemSemEqual)
613 MemSemUnequalReg = Call->Arguments[4];
614 MRI->setRegClass(Call->Arguments[3], &SPIRV::IDRegClass);
615 MRI->setRegClass(Call->Arguments[4], &SPIRV::IDRegClass);
616 }
617 if (!MemSemEqualReg.isValid())
618 MemSemEqualReg = buildConstantIntReg(MemSemEqual, MIRBuilder, GR);
619 if (!MemSemUnequalReg.isValid())
620 MemSemUnequalReg = buildConstantIntReg(MemSemUnequal, MIRBuilder, GR);
621
622 Register ScopeReg;
623 auto Scope = IsCmpxchg ? SPIRV::Scope::Workgroup : SPIRV::Scope::Device;
624 if (Call->Arguments.size() >= 6) {
625 assert(Call->Arguments.size() == 6 &&
626 "Extra args for explicit atomic cmpxchg");
627 auto ClScope = static_cast<SPIRV::CLMemoryScope>(
628 getIConstVal(Call->Arguments[5], MRI));
629 Scope = getSPIRVScope(ClScope);
630 if (ClScope == static_cast<unsigned>(Scope))
631 ScopeReg = Call->Arguments[5];
632 MRI->setRegClass(Call->Arguments[5], &SPIRV::IDRegClass);
633 }
634 if (!ScopeReg.isValid())
635 ScopeReg = buildConstantIntReg(Scope, MIRBuilder, GR);
636
637 Register Expected = IsCmpxchg
638 ? ExpectedArg
639 : buildLoadInst(SpvDesiredTy, ExpectedArg, MIRBuilder,
640 GR, LLT::scalar(32));
641 MRI->setType(Expected, DesiredLLT);
642 Register Tmp = !IsCmpxchg ? MRI->createGenericVirtualRegister(DesiredLLT)
643 : Call->ReturnRegister;
644 if (!MRI->getRegClassOrNull(Tmp))
645 MRI->setRegClass(Tmp, &SPIRV::IDRegClass);
646 GR->assignSPIRVTypeToVReg(SpvDesiredTy, Tmp, MIRBuilder.getMF());
647
648 SPIRVType *IntTy = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
649 MIRBuilder.buildInstr(Opcode)
650 .addDef(Tmp)
651 .addUse(GR->getSPIRVTypeID(IntTy))
652 .addUse(ObjectPtr)
653 .addUse(ScopeReg)
654 .addUse(MemSemEqualReg)
655 .addUse(MemSemUnequalReg)
656 .addUse(Desired)
658 if (!IsCmpxchg) {
659 MIRBuilder.buildInstr(SPIRV::OpStore).addUse(ExpectedArg).addUse(Tmp);
660 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Call->ReturnRegister, Tmp, Expected);
661 }
662 return true;
663}
664
665/// Helper function for building an atomic load instruction.
666static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
667 MachineIRBuilder &MIRBuilder,
669 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
670 Register ScopeRegister =
671 Call->Arguments.size() >= 4 ? Call->Arguments[3] : Register();
672
673 assert(Call->Arguments.size() <= 4 &&
674 "Too many args for explicit atomic RMW");
675 ScopeRegister = buildScopeReg(ScopeRegister, SPIRV::Scope::Workgroup,
676 MIRBuilder, GR, MRI);
677
678 Register PtrRegister = Call->Arguments[0];
679 unsigned Semantics = SPIRV::MemorySemantics::None;
680 MRI->setRegClass(PtrRegister, &SPIRV::IDRegClass);
681 Register MemSemanticsReg =
682 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
683 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
684 Semantics, MIRBuilder, GR);
685 MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
686 MIRBuilder.buildInstr(Opcode)
687 .addDef(Call->ReturnRegister)
688 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
689 .addUse(PtrRegister)
690 .addUse(ScopeRegister)
691 .addUse(MemSemanticsReg)
692 .addUse(Call->Arguments[1]);
693 return true;
694}
695
696/// Helper function for building an atomic floating-type instruction.
698 unsigned Opcode,
699 MachineIRBuilder &MIRBuilder,
701 assert(Call->Arguments.size() == 4 &&
702 "Wrong number of atomic floating-type builtin");
703
704 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
705
706 Register PtrReg = Call->Arguments[0];
707 MRI->setRegClass(PtrReg, &SPIRV::IDRegClass);
708
709 Register ScopeReg = Call->Arguments[1];
710 MRI->setRegClass(ScopeReg, &SPIRV::IDRegClass);
711
712 Register MemSemanticsReg = Call->Arguments[2];
713 MRI->setRegClass(MemSemanticsReg, &SPIRV::IDRegClass);
714
715 Register ValueReg = Call->Arguments[3];
716 MRI->setRegClass(ValueReg, &SPIRV::IDRegClass);
717
718 MIRBuilder.buildInstr(Opcode)
719 .addDef(Call->ReturnRegister)
720 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
721 .addUse(PtrReg)
722 .addUse(ScopeReg)
723 .addUse(MemSemanticsReg)
724 .addUse(ValueReg);
725 return true;
726}
727
728/// Helper function for building atomic flag instructions (e.g.
729/// OpAtomicFlagTestAndSet).
731 unsigned Opcode, MachineIRBuilder &MIRBuilder,
733 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
734 Register PtrRegister = Call->Arguments[0];
735 unsigned Semantics = SPIRV::MemorySemantics::SequentiallyConsistent;
736 Register MemSemanticsReg =
737 Call->Arguments.size() >= 2 ? Call->Arguments[1] : Register();
738 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
739 Semantics, MIRBuilder, GR);
740
741 assert((Opcode != SPIRV::OpAtomicFlagClear ||
742 (Semantics != SPIRV::MemorySemantics::Acquire &&
743 Semantics != SPIRV::MemorySemantics::AcquireRelease)) &&
744 "Invalid memory order argument!");
745
746 Register ScopeRegister =
747 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
748 ScopeRegister =
749 buildScopeReg(ScopeRegister, SPIRV::Scope::Device, MIRBuilder, GR, MRI);
750
751 auto MIB = MIRBuilder.buildInstr(Opcode);
752 if (Opcode == SPIRV::OpAtomicFlagTestAndSet)
753 MIB.addDef(Call->ReturnRegister)
754 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
755
756 MIB.addUse(PtrRegister).addUse(ScopeRegister).addUse(MemSemanticsReg);
757 return true;
758}
759
760/// Helper function for building barriers, i.e., memory/control ordering
761/// operations.
762static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
763 MachineIRBuilder &MIRBuilder,
765 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
766 unsigned MemFlags = getIConstVal(Call->Arguments[0], MRI);
767 unsigned MemSemantics = SPIRV::MemorySemantics::None;
768
769 if (MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE)
770 MemSemantics |= SPIRV::MemorySemantics::WorkgroupMemory;
771
772 if (MemFlags & SPIRV::CLK_GLOBAL_MEM_FENCE)
773 MemSemantics |= SPIRV::MemorySemantics::CrossWorkgroupMemory;
774
775 if (MemFlags & SPIRV::CLK_IMAGE_MEM_FENCE)
776 MemSemantics |= SPIRV::MemorySemantics::ImageMemory;
777
778 if (Opcode == SPIRV::OpMemoryBarrier) {
779 std::memory_order MemOrder =
780 static_cast<std::memory_order>(getIConstVal(Call->Arguments[1], MRI));
781 MemSemantics = getSPIRVMemSemantics(MemOrder) | MemSemantics;
782 } else {
783 MemSemantics |= SPIRV::MemorySemantics::SequentiallyConsistent;
784 }
785
786 Register MemSemanticsReg;
787 if (MemFlags == MemSemantics) {
788 MemSemanticsReg = Call->Arguments[0];
789 MRI->setRegClass(MemSemanticsReg, &SPIRV::IDRegClass);
790 } else
791 MemSemanticsReg = buildConstantIntReg(MemSemantics, MIRBuilder, GR);
792
793 Register ScopeReg;
794 SPIRV::Scope::Scope Scope = SPIRV::Scope::Workgroup;
795 SPIRV::Scope::Scope MemScope = Scope;
796 if (Call->Arguments.size() >= 2) {
797 assert(
798 ((Opcode != SPIRV::OpMemoryBarrier && Call->Arguments.size() == 2) ||
799 (Opcode == SPIRV::OpMemoryBarrier && Call->Arguments.size() == 3)) &&
800 "Extra args for explicitly scoped barrier");
801 Register ScopeArg = (Opcode == SPIRV::OpMemoryBarrier) ? Call->Arguments[2]
802 : Call->Arguments[1];
803 SPIRV::CLMemoryScope CLScope =
804 static_cast<SPIRV::CLMemoryScope>(getIConstVal(ScopeArg, MRI));
805 MemScope = getSPIRVScope(CLScope);
806 if (!(MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE) ||
807 (Opcode == SPIRV::OpMemoryBarrier))
808 Scope = MemScope;
809
810 if (CLScope == static_cast<unsigned>(Scope)) {
811 ScopeReg = Call->Arguments[1];
812 MRI->setRegClass(ScopeReg, &SPIRV::IDRegClass);
813 }
814 }
815
816 if (!ScopeReg.isValid())
817 ScopeReg = buildConstantIntReg(Scope, MIRBuilder, GR);
818
819 auto MIB = MIRBuilder.buildInstr(Opcode).addUse(ScopeReg);
820 if (Opcode != SPIRV::OpMemoryBarrier)
821 MIB.addUse(buildConstantIntReg(MemScope, MIRBuilder, GR));
822 MIB.addUse(MemSemanticsReg);
823 return true;
824}
825
826static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim) {
827 switch (dim) {
828 case SPIRV::Dim::DIM_1D:
829 case SPIRV::Dim::DIM_Buffer:
830 return 1;
831 case SPIRV::Dim::DIM_2D:
832 case SPIRV::Dim::DIM_Cube:
833 case SPIRV::Dim::DIM_Rect:
834 return 2;
835 case SPIRV::Dim::DIM_3D:
836 return 3;
837 default:
838 report_fatal_error("Cannot get num components for given Dim");
839 }
840}
841
842/// Helper function for obtaining the number of size components.
843static unsigned getNumSizeComponents(SPIRVType *imgType) {
844 assert(imgType->getOpcode() == SPIRV::OpTypeImage);
845 auto dim = static_cast<SPIRV::Dim::Dim>(imgType->getOperand(2).getImm());
846 unsigned numComps = getNumComponentsForDim(dim);
847 bool arrayed = imgType->getOperand(4).getImm() == 1;
848 return arrayed ? numComps + 1 : numComps;
849}
850
851//===----------------------------------------------------------------------===//
852// Implementation functions for each builtin group
853//===----------------------------------------------------------------------===//
854
855static bool generateExtInst(const SPIRV::IncomingCall *Call,
856 MachineIRBuilder &MIRBuilder,
858 // Lookup the extended instruction number in the TableGen records.
859 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
861 SPIRV::lookupExtendedBuiltin(Builtin->Name, Builtin->Set)->Number;
862
863 // Build extended instruction.
864 auto MIB =
865 MIRBuilder.buildInstr(SPIRV::OpExtInst)
866 .addDef(Call->ReturnRegister)
867 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
868 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
869 .addImm(Number);
870
871 for (auto Argument : Call->Arguments)
872 MIB.addUse(Argument);
873 return true;
874}
875
877 MachineIRBuilder &MIRBuilder,
879 // Lookup the instruction opcode in the TableGen records.
880 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
881 unsigned Opcode =
882 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
883
884 Register CompareRegister;
885 SPIRVType *RelationType;
886 std::tie(CompareRegister, RelationType) =
887 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
888
889 // Build relational instruction.
890 auto MIB = MIRBuilder.buildInstr(Opcode)
891 .addDef(CompareRegister)
892 .addUse(GR->getSPIRVTypeID(RelationType));
893
894 for (auto Argument : Call->Arguments)
895 MIB.addUse(Argument);
896
897 // Build select instruction.
898 return buildSelectInst(MIRBuilder, Call->ReturnRegister, CompareRegister,
899 Call->ReturnType, GR);
900}
901
903 MachineIRBuilder &MIRBuilder,
905 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
906 const SPIRV::GroupBuiltin *GroupBuiltin =
907 SPIRV::lookupGroupBuiltin(Builtin->Name);
908 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
909 Register Arg0;
910 if (GroupBuiltin->HasBoolArg) {
911 Register ConstRegister = Call->Arguments[0];
912 auto ArgInstruction = getDefInstrMaybeConstant(ConstRegister, MRI);
913 (void)ArgInstruction;
914 // TODO: support non-constant bool values.
915 assert(ArgInstruction->getOpcode() == TargetOpcode::G_CONSTANT &&
916 "Only constant bool value args are supported");
917 if (GR->getSPIRVTypeForVReg(Call->Arguments[0])->getOpcode() !=
918 SPIRV::OpTypeBool)
919 Arg0 = GR->buildConstantInt(getIConstVal(ConstRegister, MRI), MIRBuilder,
920 GR->getOrCreateSPIRVBoolType(MIRBuilder));
921 }
922
923 Register GroupResultRegister = Call->ReturnRegister;
924 SPIRVType *GroupResultType = Call->ReturnType;
925
926 // TODO: maybe we need to check whether the result type is already boolean
927 // and in this case do not insert select instruction.
928 const bool HasBoolReturnTy =
929 GroupBuiltin->IsElect || GroupBuiltin->IsAllOrAny ||
930 GroupBuiltin->IsAllEqual || GroupBuiltin->IsLogical ||
931 GroupBuiltin->IsInverseBallot || GroupBuiltin->IsBallotBitExtract;
932
933 if (HasBoolReturnTy)
934 std::tie(GroupResultRegister, GroupResultType) =
935 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
936
937 auto Scope = Builtin->Name.starts_with("sub_group") ? SPIRV::Scope::Subgroup
938 : SPIRV::Scope::Workgroup;
939 Register ScopeRegister = buildConstantIntReg(Scope, MIRBuilder, GR);
940
941 // Build work/sub group instruction.
942 auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode)
943 .addDef(GroupResultRegister)
944 .addUse(GR->getSPIRVTypeID(GroupResultType))
945 .addUse(ScopeRegister);
946
947 if (!GroupBuiltin->NoGroupOperation)
948 MIB.addImm(GroupBuiltin->GroupOperation);
949 if (Call->Arguments.size() > 0) {
950 MIB.addUse(Arg0.isValid() ? Arg0 : Call->Arguments[0]);
951 MRI->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
952 for (unsigned i = 1; i < Call->Arguments.size(); i++) {
953 MIB.addUse(Call->Arguments[i]);
954 MRI->setRegClass(Call->Arguments[i], &SPIRV::IDRegClass);
955 }
956 }
957
958 // Build select instruction.
959 if (HasBoolReturnTy)
960 buildSelectInst(MIRBuilder, Call->ReturnRegister, GroupResultRegister,
961 Call->ReturnType, GR);
962 return true;
963}
964
966 MachineIRBuilder &MIRBuilder,
968 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
969 MachineFunction &MF = MIRBuilder.getMF();
970 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
971 if (!ST->canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
972 std::string DiagMsg = std::string(Builtin->Name) +
973 ": the builtin requires the following SPIR-V "
974 "extension: SPV_INTEL_subgroups";
975 report_fatal_error(DiagMsg.c_str(), false);
976 }
977 const SPIRV::IntelSubgroupsBuiltin *IntelSubgroups =
978 SPIRV::lookupIntelSubgroupsBuiltin(Builtin->Name);
979 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
980
981 uint32_t OpCode = IntelSubgroups->Opcode;
982 if (IntelSubgroups->IsBlock) {
983 // Minimal number or arguments set in TableGen records is 1
984 if (SPIRVType *Arg0Type = GR->getSPIRVTypeForVReg(Call->Arguments[0])) {
985 if (Arg0Type->getOpcode() == SPIRV::OpTypeImage) {
986 // TODO: add required validation from the specification:
987 // "'Image' must be an object whose type is OpTypeImage with a 'Sampled'
988 // operand of 0 or 2. If the 'Sampled' operand is 2, then some
989 // dimensions require a capability."
990 switch (OpCode) {
991 case SPIRV::OpSubgroupBlockReadINTEL:
992 OpCode = SPIRV::OpSubgroupImageBlockReadINTEL;
993 break;
994 case SPIRV::OpSubgroupBlockWriteINTEL:
995 OpCode = SPIRV::OpSubgroupImageBlockWriteINTEL;
996 break;
997 }
998 }
999 }
1000 }
1001
1002 // TODO: opaque pointers types should be eventually resolved in such a way
1003 // that validation of block read is enabled with respect to the following
1004 // specification requirement:
1005 // "'Result Type' may be a scalar or vector type, and its component type must
1006 // be equal to the type pointed to by 'Ptr'."
1007 // For example, function parameter type should not be default i8 pointer, but
1008 // depend on the result type of the instruction where it is used as a pointer
1009 // argument of OpSubgroupBlockReadINTEL
1010
1011 // Build Intel subgroups instruction
1013 IntelSubgroups->IsWrite
1014 ? MIRBuilder.buildInstr(OpCode)
1015 : MIRBuilder.buildInstr(OpCode)
1016 .addDef(Call->ReturnRegister)
1017 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1018 for (size_t i = 0; i < Call->Arguments.size(); ++i) {
1019 MIB.addUse(Call->Arguments[i]);
1020 MRI->setRegClass(Call->Arguments[i], &SPIRV::IDRegClass);
1021 }
1022
1023 return true;
1024}
1025
1027 MachineIRBuilder &MIRBuilder,
1028 SPIRVGlobalRegistry *GR) {
1029 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1030 MachineFunction &MF = MIRBuilder.getMF();
1031 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1032 if (!ST->canUseExtension(
1033 SPIRV::Extension::SPV_KHR_uniform_group_instructions)) {
1034 std::string DiagMsg = std::string(Builtin->Name) +
1035 ": the builtin requires the following SPIR-V "
1036 "extension: SPV_KHR_uniform_group_instructions";
1037 report_fatal_error(DiagMsg.c_str(), false);
1038 }
1039 const SPIRV::GroupUniformBuiltin *GroupUniform =
1040 SPIRV::lookupGroupUniformBuiltin(Builtin->Name);
1041 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1042
1043 Register GroupResultReg = Call->ReturnRegister;
1044 MRI->setRegClass(GroupResultReg, &SPIRV::IDRegClass);
1045
1046 // Scope
1047 Register ScopeReg = Call->Arguments[0];
1048 MRI->setRegClass(ScopeReg, &SPIRV::IDRegClass);
1049
1050 // Group Operation
1051 Register ConstGroupOpReg = Call->Arguments[1];
1052 const MachineInstr *Const = getDefInstrMaybeConstant(ConstGroupOpReg, MRI);
1053 if (!Const || Const->getOpcode() != TargetOpcode::G_CONSTANT)
1055 "expect a constant group operation for a uniform group instruction",
1056 false);
1057 const MachineOperand &ConstOperand = Const->getOperand(1);
1058 if (!ConstOperand.isCImm())
1059 report_fatal_error("uniform group instructions: group operation must be an "
1060 "integer constant",
1061 false);
1062
1063 // Value
1064 Register ValueReg = Call->Arguments[2];
1065 MRI->setRegClass(ValueReg, &SPIRV::IDRegClass);
1066
1067 auto MIB = MIRBuilder.buildInstr(GroupUniform->Opcode)
1068 .addDef(GroupResultReg)
1069 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1070 .addUse(ScopeReg);
1071 addNumImm(ConstOperand.getCImm()->getValue(), MIB);
1072 MIB.addUse(ValueReg);
1073
1074 return true;
1075}
1076
1077// These queries ask for a single size_t result for a given dimension index, e.g
1078// size_t get_global_id(uint dimindex). In SPIR-V, the builtins corresonding to
1079// these values are all vec3 types, so we need to extract the correct index or
1080// return defaultVal (0 or 1 depending on the query). We also handle extending
1081// or tuncating in case size_t does not match the expected result type's
1082// bitwidth.
1083//
1084// For a constant index >= 3 we generate:
1085// %res = OpConstant %SizeT 0
1086//
1087// For other indices we generate:
1088// %g = OpVariable %ptr_V3_SizeT Input
1089// OpDecorate %g BuiltIn XXX
1090// OpDecorate %g LinkageAttributes "__spirv_BuiltInXXX"
1091// OpDecorate %g Constant
1092// %loadedVec = OpLoad %V3_SizeT %g
1093//
1094// Then, if the index is constant < 3, we generate:
1095// %res = OpCompositeExtract %SizeT %loadedVec idx
1096// If the index is dynamic, we generate:
1097// %tmp = OpVectorExtractDynamic %SizeT %loadedVec %idx
1098// %cmp = OpULessThan %bool %idx %const_3
1099// %res = OpSelect %SizeT %cmp %tmp %const_0
1100//
1101// If the bitwidth of %res does not match the expected return type, we add an
1102// extend or truncate.
1104 MachineIRBuilder &MIRBuilder,
1106 SPIRV::BuiltIn::BuiltIn BuiltinValue,
1107 uint64_t DefaultValue) {
1108 Register IndexRegister = Call->Arguments[0];
1109 const unsigned ResultWidth = Call->ReturnType->getOperand(1).getImm();
1110 const unsigned PointerSize = GR->getPointerSize();
1111 const SPIRVType *PointerSizeType =
1112 GR->getOrCreateSPIRVIntegerType(PointerSize, MIRBuilder);
1113 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1114 auto IndexInstruction = getDefInstrMaybeConstant(IndexRegister, MRI);
1115
1116 // Set up the final register to do truncation or extension on at the end.
1117 Register ToTruncate = Call->ReturnRegister;
1118
1119 // If the index is constant, we can statically determine if it is in range.
1120 bool IsConstantIndex =
1121 IndexInstruction->getOpcode() == TargetOpcode::G_CONSTANT;
1122
1123 // If it's out of range (max dimension is 3), we can just return the constant
1124 // default value (0 or 1 depending on which query function).
1125 if (IsConstantIndex && getIConstVal(IndexRegister, MRI) >= 3) {
1126 Register DefaultReg = Call->ReturnRegister;
1127 if (PointerSize != ResultWidth) {
1128 DefaultReg = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1129 MRI->setRegClass(DefaultReg, &SPIRV::IDRegClass);
1130 GR->assignSPIRVTypeToVReg(PointerSizeType, DefaultReg,
1131 MIRBuilder.getMF());
1132 ToTruncate = DefaultReg;
1133 }
1134 auto NewRegister =
1135 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType);
1136 MIRBuilder.buildCopy(DefaultReg, NewRegister);
1137 } else { // If it could be in range, we need to load from the given builtin.
1138 auto Vec3Ty =
1139 GR->getOrCreateSPIRVVectorType(PointerSizeType, 3, MIRBuilder);
1140 Register LoadedVector =
1141 buildBuiltinVariableLoad(MIRBuilder, Vec3Ty, GR, BuiltinValue,
1142 LLT::fixed_vector(3, PointerSize));
1143 // Set up the vreg to extract the result to (possibly a new temporary one).
1144 Register Extracted = Call->ReturnRegister;
1145 if (!IsConstantIndex || PointerSize != ResultWidth) {
1146 Extracted = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1147 MRI->setRegClass(Extracted, &SPIRV::IDRegClass);
1148 GR->assignSPIRVTypeToVReg(PointerSizeType, Extracted, MIRBuilder.getMF());
1149 }
1150 // Use Intrinsic::spv_extractelt so dynamic vs static extraction is
1151 // handled later: extr = spv_extractelt LoadedVector, IndexRegister.
1152 MachineInstrBuilder ExtractInst = MIRBuilder.buildIntrinsic(
1153 Intrinsic::spv_extractelt, ArrayRef<Register>{Extracted}, true, false);
1154 ExtractInst.addUse(LoadedVector).addUse(IndexRegister);
1155
1156 // If the index is dynamic, need check if it's < 3, and then use a select.
1157 if (!IsConstantIndex) {
1158 insertAssignInstr(Extracted, nullptr, PointerSizeType, GR, MIRBuilder,
1159 *MRI);
1160
1161 auto IndexType = GR->getSPIRVTypeForVReg(IndexRegister);
1162 auto BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder);
1163
1164 Register CompareRegister =
1165 MRI->createGenericVirtualRegister(LLT::scalar(1));
1166 MRI->setRegClass(CompareRegister, &SPIRV::IDRegClass);
1167 GR->assignSPIRVTypeToVReg(BoolType, CompareRegister, MIRBuilder.getMF());
1168
1169 // Use G_ICMP to check if idxVReg < 3.
1170 MIRBuilder.buildICmp(CmpInst::ICMP_ULT, CompareRegister, IndexRegister,
1171 GR->buildConstantInt(3, MIRBuilder, IndexType));
1172
1173 // Get constant for the default value (0 or 1 depending on which
1174 // function).
1175 Register DefaultRegister =
1176 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType);
1177
1178 // Get a register for the selection result (possibly a new temporary one).
1179 Register SelectionResult = Call->ReturnRegister;
1180 if (PointerSize != ResultWidth) {
1181 SelectionResult =
1182 MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1183 MRI->setRegClass(SelectionResult, &SPIRV::IDRegClass);
1184 GR->assignSPIRVTypeToVReg(PointerSizeType, SelectionResult,
1185 MIRBuilder.getMF());
1186 }
1187 // Create the final G_SELECT to return the extracted value or the default.
1188 MIRBuilder.buildSelect(SelectionResult, CompareRegister, Extracted,
1189 DefaultRegister);
1190 ToTruncate = SelectionResult;
1191 } else {
1192 ToTruncate = Extracted;
1193 }
1194 }
1195 // Alter the result's bitwidth if it does not match the SizeT value extracted.
1196 if (PointerSize != ResultWidth)
1197 MIRBuilder.buildZExtOrTrunc(Call->ReturnRegister, ToTruncate);
1198 return true;
1199}
1200
1202 MachineIRBuilder &MIRBuilder,
1203 SPIRVGlobalRegistry *GR) {
1204 // Lookup the builtin variable record.
1205 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1206 SPIRV::BuiltIn::BuiltIn Value =
1207 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1208
1209 if (Value == SPIRV::BuiltIn::GlobalInvocationId)
1210 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, 0);
1211
1212 // Build a load instruction for the builtin variable.
1213 unsigned BitWidth = GR->getScalarOrVectorBitWidth(Call->ReturnType);
1214 LLT LLType;
1215 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeVector)
1216 LLType =
1217 LLT::fixed_vector(Call->ReturnType->getOperand(2).getImm(), BitWidth);
1218 else
1219 LLType = LLT::scalar(BitWidth);
1220
1221 return buildBuiltinVariableLoad(MIRBuilder, Call->ReturnType, GR, Value,
1222 LLType, Call->ReturnRegister);
1223}
1224
1226 MachineIRBuilder &MIRBuilder,
1227 SPIRVGlobalRegistry *GR) {
1228 // Lookup the instruction opcode in the TableGen records.
1229 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1230 unsigned Opcode =
1231 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1232
1233 switch (Opcode) {
1234 case SPIRV::OpStore:
1235 return buildAtomicInitInst(Call, MIRBuilder);
1236 case SPIRV::OpAtomicLoad:
1237 return buildAtomicLoadInst(Call, MIRBuilder, GR);
1238 case SPIRV::OpAtomicStore:
1239 return buildAtomicStoreInst(Call, MIRBuilder, GR);
1240 case SPIRV::OpAtomicCompareExchange:
1241 case SPIRV::OpAtomicCompareExchangeWeak:
1242 return buildAtomicCompareExchangeInst(Call, MIRBuilder, GR);
1243 case SPIRV::OpAtomicIAdd:
1244 case SPIRV::OpAtomicISub:
1245 case SPIRV::OpAtomicOr:
1246 case SPIRV::OpAtomicXor:
1247 case SPIRV::OpAtomicAnd:
1248 case SPIRV::OpAtomicExchange:
1249 return buildAtomicRMWInst(Call, Opcode, MIRBuilder, GR);
1250 case SPIRV::OpMemoryBarrier:
1251 return buildBarrierInst(Call, SPIRV::OpMemoryBarrier, MIRBuilder, GR);
1252 case SPIRV::OpAtomicFlagTestAndSet:
1253 case SPIRV::OpAtomicFlagClear:
1254 return buildAtomicFlagInst(Call, Opcode, MIRBuilder, GR);
1255 default:
1256 return false;
1257 }
1258}
1259
1261 MachineIRBuilder &MIRBuilder,
1262 SPIRVGlobalRegistry *GR) {
1263 // Lookup the instruction opcode in the TableGen records.
1264 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1265 unsigned Opcode = SPIRV::lookupAtomicFloatingBuiltin(Builtin->Name)->Opcode;
1266
1267 switch (Opcode) {
1268 case SPIRV::OpAtomicFAddEXT:
1269 case SPIRV::OpAtomicFMinEXT:
1270 case SPIRV::OpAtomicFMaxEXT:
1271 return buildAtomicFloatingRMWInst(Call, Opcode, MIRBuilder, GR);
1272 default:
1273 return false;
1274 }
1275}
1276
1278 MachineIRBuilder &MIRBuilder,
1279 SPIRVGlobalRegistry *GR) {
1280 // Lookup the instruction opcode in the TableGen records.
1281 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1282 unsigned Opcode =
1283 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1284
1285 return buildBarrierInst(Call, Opcode, MIRBuilder, GR);
1286}
1287
1289 MachineIRBuilder &MIRBuilder,
1290 SPIRVGlobalRegistry *GR) {
1291 unsigned Opcode = GR->getSPIRVTypeForVReg(Call->Arguments[0])->getOpcode();
1292 bool IsVec = Opcode == SPIRV::OpTypeVector;
1293 // Use OpDot only in case of vector args and OpFMul in case of scalar args.
1294 MIRBuilder.buildInstr(IsVec ? SPIRV::OpDot : SPIRV::OpFMulS)
1295 .addDef(Call->ReturnRegister)
1296 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1297 .addUse(Call->Arguments[0])
1298 .addUse(Call->Arguments[1]);
1299 return true;
1300}
1301
1303 MachineIRBuilder &MIRBuilder,
1304 SPIRVGlobalRegistry *GR) {
1305 // Lookup the builtin record.
1306 SPIRV::BuiltIn::BuiltIn Value =
1307 SPIRV::lookupGetBuiltin(Call->Builtin->Name, Call->Builtin->Set)->Value;
1308 uint64_t IsDefault = (Value == SPIRV::BuiltIn::GlobalSize ||
1309 Value == SPIRV::BuiltIn::WorkgroupSize ||
1310 Value == SPIRV::BuiltIn::EnqueuedWorkgroupSize);
1311 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, IsDefault ? 1 : 0);
1312}
1313
1315 MachineIRBuilder &MIRBuilder,
1316 SPIRVGlobalRegistry *GR) {
1317 // Lookup the image size query component number in the TableGen records.
1318 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1319 uint32_t Component =
1320 SPIRV::lookupImageQueryBuiltin(Builtin->Name, Builtin->Set)->Component;
1321 // Query result may either be a vector or a scalar. If return type is not a
1322 // vector, expect only a single size component. Otherwise get the number of
1323 // expected components.
1324 SPIRVType *RetTy = Call->ReturnType;
1325 unsigned NumExpectedRetComponents = RetTy->getOpcode() == SPIRV::OpTypeVector
1326 ? RetTy->getOperand(2).getImm()
1327 : 1;
1328 // Get the actual number of query result/size components.
1329 SPIRVType *ImgType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
1330 unsigned NumActualRetComponents = getNumSizeComponents(ImgType);
1331 Register QueryResult = Call->ReturnRegister;
1332 SPIRVType *QueryResultType = Call->ReturnType;
1333 if (NumExpectedRetComponents != NumActualRetComponents) {
1334 QueryResult = MIRBuilder.getMRI()->createGenericVirtualRegister(
1335 LLT::fixed_vector(NumActualRetComponents, 32));
1336 MIRBuilder.getMRI()->setRegClass(QueryResult, &SPIRV::IDRegClass);
1337 SPIRVType *IntTy = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
1338 QueryResultType = GR->getOrCreateSPIRVVectorType(
1339 IntTy, NumActualRetComponents, MIRBuilder);
1340 GR->assignSPIRVTypeToVReg(QueryResultType, QueryResult, MIRBuilder.getMF());
1341 }
1342 bool IsDimBuf = ImgType->getOperand(2).getImm() == SPIRV::Dim::DIM_Buffer;
1343 unsigned Opcode =
1344 IsDimBuf ? SPIRV::OpImageQuerySize : SPIRV::OpImageQuerySizeLod;
1345 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1346 auto MIB = MIRBuilder.buildInstr(Opcode)
1347 .addDef(QueryResult)
1348 .addUse(GR->getSPIRVTypeID(QueryResultType))
1349 .addUse(Call->Arguments[0]);
1350 if (!IsDimBuf)
1351 MIB.addUse(buildConstantIntReg(0, MIRBuilder, GR)); // Lod id.
1352 if (NumExpectedRetComponents == NumActualRetComponents)
1353 return true;
1354 if (NumExpectedRetComponents == 1) {
1355 // Only 1 component is expected, build OpCompositeExtract instruction.
1356 unsigned ExtractedComposite =
1357 Component == 3 ? NumActualRetComponents - 1 : Component;
1358 assert(ExtractedComposite < NumActualRetComponents &&
1359 "Invalid composite index!");
1360 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
1361 .addDef(Call->ReturnRegister)
1362 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1363 .addUse(QueryResult)
1364 .addImm(ExtractedComposite);
1365 } else {
1366 // More than 1 component is expected, fill a new vector.
1367 auto MIB = MIRBuilder.buildInstr(SPIRV::OpVectorShuffle)
1368 .addDef(Call->ReturnRegister)
1369 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1370 .addUse(QueryResult)
1371 .addUse(QueryResult);
1372 for (unsigned i = 0; i < NumExpectedRetComponents; ++i)
1373 MIB.addImm(i < NumActualRetComponents ? i : 0xffffffff);
1374 }
1375 return true;
1376}
1377
1379 MachineIRBuilder &MIRBuilder,
1380 SPIRVGlobalRegistry *GR) {
1381 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt &&
1382 "Image samples query result must be of int type!");
1383
1384 // Lookup the instruction opcode in the TableGen records.
1385 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1386 unsigned Opcode =
1387 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1388
1389 Register Image = Call->Arguments[0];
1390 MIRBuilder.getMRI()->setRegClass(Image, &SPIRV::IDRegClass);
1391 SPIRV::Dim::Dim ImageDimensionality = static_cast<SPIRV::Dim::Dim>(
1392 GR->getSPIRVTypeForVReg(Image)->getOperand(2).getImm());
1393 (void)ImageDimensionality;
1394
1395 switch (Opcode) {
1396 case SPIRV::OpImageQuerySamples:
1397 assert(ImageDimensionality == SPIRV::Dim::DIM_2D &&
1398 "Image must be of 2D dimensionality");
1399 break;
1400 case SPIRV::OpImageQueryLevels:
1401 assert((ImageDimensionality == SPIRV::Dim::DIM_1D ||
1402 ImageDimensionality == SPIRV::Dim::DIM_2D ||
1403 ImageDimensionality == SPIRV::Dim::DIM_3D ||
1404 ImageDimensionality == SPIRV::Dim::DIM_Cube) &&
1405 "Image must be of 1D/2D/3D/Cube dimensionality");
1406 break;
1407 }
1408
1409 MIRBuilder.buildInstr(Opcode)
1410 .addDef(Call->ReturnRegister)
1411 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1412 .addUse(Image);
1413 return true;
1414}
1415
1416// TODO: Move to TableGen.
1417static SPIRV::SamplerAddressingMode::SamplerAddressingMode
1419 switch (Bitmask & SPIRV::CLK_ADDRESS_MODE_MASK) {
1420 case SPIRV::CLK_ADDRESS_CLAMP:
1421 return SPIRV::SamplerAddressingMode::Clamp;
1422 case SPIRV::CLK_ADDRESS_CLAMP_TO_EDGE:
1423 return SPIRV::SamplerAddressingMode::ClampToEdge;
1424 case SPIRV::CLK_ADDRESS_REPEAT:
1425 return SPIRV::SamplerAddressingMode::Repeat;
1426 case SPIRV::CLK_ADDRESS_MIRRORED_REPEAT:
1427 return SPIRV::SamplerAddressingMode::RepeatMirrored;
1428 case SPIRV::CLK_ADDRESS_NONE:
1429 return SPIRV::SamplerAddressingMode::None;
1430 default:
1431 report_fatal_error("Unknown CL address mode");
1432 }
1433}
1434
1435static unsigned getSamplerParamFromBitmask(unsigned Bitmask) {
1436 return (Bitmask & SPIRV::CLK_NORMALIZED_COORDS_TRUE) ? 1 : 0;
1437}
1438
1439static SPIRV::SamplerFilterMode::SamplerFilterMode
1441 if (Bitmask & SPIRV::CLK_FILTER_LINEAR)
1442 return SPIRV::SamplerFilterMode::Linear;
1443 if (Bitmask & SPIRV::CLK_FILTER_NEAREST)
1444 return SPIRV::SamplerFilterMode::Nearest;
1445 return SPIRV::SamplerFilterMode::Nearest;
1446}
1447
1448static bool generateReadImageInst(const StringRef DemangledCall,
1449 const SPIRV::IncomingCall *Call,
1450 MachineIRBuilder &MIRBuilder,
1451 SPIRVGlobalRegistry *GR) {
1452 Register Image = Call->Arguments[0];
1453 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1454 MRI->setRegClass(Image, &SPIRV::IDRegClass);
1455 MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
1456 bool HasOclSampler = DemangledCall.contains_insensitive("ocl_sampler");
1457 bool HasMsaa = DemangledCall.contains_insensitive("msaa");
1458 if (HasOclSampler || HasMsaa)
1459 MRI->setRegClass(Call->Arguments[2], &SPIRV::IDRegClass);
1460 if (HasOclSampler) {
1461 Register Sampler = Call->Arguments[1];
1462
1463 if (!GR->isScalarOfType(Sampler, SPIRV::OpTypeSampler) &&
1464 getDefInstrMaybeConstant(Sampler, MRI)->getOperand(1).isCImm()) {
1465 uint64_t SamplerMask = getIConstVal(Sampler, MRI);
1466 Sampler = GR->buildConstantSampler(
1468 getSamplerParamFromBitmask(SamplerMask),
1469 getSamplerFilterModeFromBitmask(SamplerMask), MIRBuilder,
1470 GR->getSPIRVTypeForVReg(Sampler));
1471 }
1472 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
1473 SPIRVType *SampledImageType =
1474 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
1475 Register SampledImage = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1476
1477 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
1478 .addDef(SampledImage)
1479 .addUse(GR->getSPIRVTypeID(SampledImageType))
1480 .addUse(Image)
1481 .addUse(Sampler);
1482
1484 MIRBuilder);
1485 SPIRVType *TempType = Call->ReturnType;
1486 bool NeedsExtraction = false;
1487 if (TempType->getOpcode() != SPIRV::OpTypeVector) {
1488 TempType =
1489 GR->getOrCreateSPIRVVectorType(Call->ReturnType, 4, MIRBuilder);
1490 NeedsExtraction = true;
1491 }
1492 LLT LLType = LLT::scalar(GR->getScalarOrVectorBitWidth(TempType));
1493 Register TempRegister = MRI->createGenericVirtualRegister(LLType);
1494 MRI->setRegClass(TempRegister, &SPIRV::IDRegClass);
1495 GR->assignSPIRVTypeToVReg(TempType, TempRegister, MIRBuilder.getMF());
1496
1497 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
1498 .addDef(NeedsExtraction ? TempRegister : Call->ReturnRegister)
1499 .addUse(GR->getSPIRVTypeID(TempType))
1500 .addUse(SampledImage)
1501 .addUse(Call->Arguments[2]) // Coordinate.
1502 .addImm(SPIRV::ImageOperand::Lod)
1503 .addUse(Lod);
1504
1505 if (NeedsExtraction)
1506 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
1507 .addDef(Call->ReturnRegister)
1508 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1509 .addUse(TempRegister)
1510 .addImm(0);
1511 } else if (HasMsaa) {
1512 MIRBuilder.buildInstr(SPIRV::OpImageRead)
1513 .addDef(Call->ReturnRegister)
1514 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1515 .addUse(Image)
1516 .addUse(Call->Arguments[1]) // Coordinate.
1517 .addImm(SPIRV::ImageOperand::Sample)
1518 .addUse(Call->Arguments[2]);
1519 } else {
1520 MIRBuilder.buildInstr(SPIRV::OpImageRead)
1521 .addDef(Call->ReturnRegister)
1522 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1523 .addUse(Image)
1524 .addUse(Call->Arguments[1]); // Coordinate.
1525 }
1526 return true;
1527}
1528
1530 MachineIRBuilder &MIRBuilder,
1531 SPIRVGlobalRegistry *GR) {
1532 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1533 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
1534 MIRBuilder.getMRI()->setRegClass(Call->Arguments[2], &SPIRV::IDRegClass);
1535 MIRBuilder.buildInstr(SPIRV::OpImageWrite)
1536 .addUse(Call->Arguments[0]) // Image.
1537 .addUse(Call->Arguments[1]) // Coordinate.
1538 .addUse(Call->Arguments[2]); // Texel.
1539 return true;
1540}
1541
1542static bool generateSampleImageInst(const StringRef DemangledCall,
1543 const SPIRV::IncomingCall *Call,
1544 MachineIRBuilder &MIRBuilder,
1545 SPIRVGlobalRegistry *GR) {
1546 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1547 if (Call->Builtin->Name.contains_insensitive(
1548 "__translate_sampler_initializer")) {
1549 // Build sampler literal.
1550 uint64_t Bitmask = getIConstVal(Call->Arguments[0], MRI);
1551 Register Sampler = GR->buildConstantSampler(
1552 Call->ReturnRegister, getSamplerAddressingModeFromBitmask(Bitmask),
1554 getSamplerFilterModeFromBitmask(Bitmask), MIRBuilder, Call->ReturnType);
1555 return Sampler.isValid();
1556 } else if (Call->Builtin->Name.contains_insensitive("__spirv_SampledImage")) {
1557 // Create OpSampledImage.
1558 Register Image = Call->Arguments[0];
1559 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
1560 SPIRVType *SampledImageType =
1561 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
1562 Register SampledImage =
1563 Call->ReturnRegister.isValid()
1564 ? Call->ReturnRegister
1565 : MRI->createVirtualRegister(&SPIRV::IDRegClass);
1566 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
1567 .addDef(SampledImage)
1568 .addUse(GR->getSPIRVTypeID(SampledImageType))
1569 .addUse(Image)
1570 .addUse(Call->Arguments[1]); // Sampler.
1571 return true;
1572 } else if (Call->Builtin->Name.contains_insensitive(
1573 "__spirv_ImageSampleExplicitLod")) {
1574 // Sample an image using an explicit level of detail.
1575 std::string ReturnType = DemangledCall.str();
1576 if (DemangledCall.contains("_R")) {
1577 ReturnType = ReturnType.substr(ReturnType.find("_R") + 2);
1578 ReturnType = ReturnType.substr(0, ReturnType.find('('));
1579 }
1580 SPIRVType *Type = GR->getOrCreateSPIRVTypeByName(ReturnType, MIRBuilder);
1581 if (!Type) {
1582 std::string DiagMsg =
1583 "Unable to recognize SPIRV type name: " + ReturnType;
1584 report_fatal_error(DiagMsg.c_str());
1585 }
1586 MRI->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1587 MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
1588 MRI->setRegClass(Call->Arguments[3], &SPIRV::IDRegClass);
1589
1590 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
1591 .addDef(Call->ReturnRegister)
1593 .addUse(Call->Arguments[0]) // Image.
1594 .addUse(Call->Arguments[1]) // Coordinate.
1595 .addImm(SPIRV::ImageOperand::Lod)
1596 .addUse(Call->Arguments[3]);
1597 return true;
1598 }
1599 return false;
1600}
1601
1603 MachineIRBuilder &MIRBuilder) {
1604 MIRBuilder.buildSelect(Call->ReturnRegister, Call->Arguments[0],
1605 Call->Arguments[1], Call->Arguments[2]);
1606 return true;
1607}
1608
1610 MachineIRBuilder &MIRBuilder,
1611 SPIRVGlobalRegistry *GR) {
1612 // Lookup the instruction opcode in the TableGen records.
1613 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1614 unsigned Opcode =
1615 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1616 const MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1617
1618 switch (Opcode) {
1619 case SPIRV::OpSpecConstant: {
1620 // Build the SpecID decoration.
1621 unsigned SpecId =
1622 static_cast<unsigned>(getIConstVal(Call->Arguments[0], MRI));
1623 buildOpDecorate(Call->ReturnRegister, MIRBuilder, SPIRV::Decoration::SpecId,
1624 {SpecId});
1625 // Determine the constant MI.
1626 Register ConstRegister = Call->Arguments[1];
1627 const MachineInstr *Const = getDefInstrMaybeConstant(ConstRegister, MRI);
1628 assert(Const &&
1629 (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
1630 Const->getOpcode() == TargetOpcode::G_FCONSTANT) &&
1631 "Argument should be either an int or floating-point constant");
1632 // Determine the opcode and built the OpSpec MI.
1633 const MachineOperand &ConstOperand = Const->getOperand(1);
1634 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeBool) {
1635 assert(ConstOperand.isCImm() && "Int constant operand is expected");
1636 Opcode = ConstOperand.getCImm()->getValue().getZExtValue()
1637 ? SPIRV::OpSpecConstantTrue
1638 : SPIRV::OpSpecConstantFalse;
1639 }
1640 auto MIB = MIRBuilder.buildInstr(Opcode)
1641 .addDef(Call->ReturnRegister)
1642 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1643
1644 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeBool) {
1645 if (Const->getOpcode() == TargetOpcode::G_CONSTANT)
1646 addNumImm(ConstOperand.getCImm()->getValue(), MIB);
1647 else
1648 addNumImm(ConstOperand.getFPImm()->getValueAPF().bitcastToAPInt(), MIB);
1649 }
1650 return true;
1651 }
1652 case SPIRV::OpSpecConstantComposite: {
1653 auto MIB = MIRBuilder.buildInstr(Opcode)
1654 .addDef(Call->ReturnRegister)
1655 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1656 for (unsigned i = 0; i < Call->Arguments.size(); i++)
1657 MIB.addUse(Call->Arguments[i]);
1658 return true;
1659 }
1660 default:
1661 return false;
1662 }
1663}
1664
1665static bool buildNDRange(const SPIRV::IncomingCall *Call,
1666 MachineIRBuilder &MIRBuilder,
1667 SPIRVGlobalRegistry *GR) {
1668 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1669 MRI->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1670 SPIRVType *PtrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
1671 assert(PtrType->getOpcode() == SPIRV::OpTypePointer &&
1672 PtrType->getOperand(2).isReg());
1673 Register TypeReg = PtrType->getOperand(2).getReg();
1675 MachineFunction &MF = MIRBuilder.getMF();
1676 Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1677 GR->assignSPIRVTypeToVReg(StructType, TmpReg, MF);
1678 // Skip the first arg, it's the destination pointer. OpBuildNDRange takes
1679 // three other arguments, so pass zero constant on absence.
1680 unsigned NumArgs = Call->Arguments.size();
1681 assert(NumArgs >= 2);
1682 Register GlobalWorkSize = Call->Arguments[NumArgs < 4 ? 1 : 2];
1683 MRI->setRegClass(GlobalWorkSize, &SPIRV::IDRegClass);
1684 Register LocalWorkSize =
1685 NumArgs == 2 ? Register(0) : Call->Arguments[NumArgs < 4 ? 2 : 3];
1686 if (LocalWorkSize.isValid())
1687 MRI->setRegClass(LocalWorkSize, &SPIRV::IDRegClass);
1688 Register GlobalWorkOffset = NumArgs <= 3 ? Register(0) : Call->Arguments[1];
1689 if (GlobalWorkOffset.isValid())
1690 MRI->setRegClass(GlobalWorkOffset, &SPIRV::IDRegClass);
1691 if (NumArgs < 4) {
1692 Register Const;
1693 SPIRVType *SpvTy = GR->getSPIRVTypeForVReg(GlobalWorkSize);
1694 if (SpvTy->getOpcode() == SPIRV::OpTypePointer) {
1695 MachineInstr *DefInstr = MRI->getUniqueVRegDef(GlobalWorkSize);
1696 assert(DefInstr && isSpvIntrinsic(*DefInstr, Intrinsic::spv_gep) &&
1697 DefInstr->getOperand(3).isReg());
1698 Register GWSPtr = DefInstr->getOperand(3).getReg();
1699 if (!MRI->getRegClassOrNull(GWSPtr))
1700 MRI->setRegClass(GWSPtr, &SPIRV::IDRegClass);
1701 // TODO: Maybe simplify generation of the type of the fields.
1702 unsigned Size = Call->Builtin->Name.equals("ndrange_3D") ? 3 : 2;
1703 unsigned BitWidth = GR->getPointerSize() == 64 ? 64 : 32;
1705 Type *FieldTy = ArrayType::get(BaseTy, Size);
1706 SPIRVType *SpvFieldTy = GR->getOrCreateSPIRVType(FieldTy, MIRBuilder);
1707 GlobalWorkSize = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1708 GR->assignSPIRVTypeToVReg(SpvFieldTy, GlobalWorkSize, MF);
1709 MIRBuilder.buildInstr(SPIRV::OpLoad)
1710 .addDef(GlobalWorkSize)
1711 .addUse(GR->getSPIRVTypeID(SpvFieldTy))
1712 .addUse(GWSPtr);
1713 Const = GR->getOrCreateConsIntArray(0, MIRBuilder, SpvFieldTy);
1714 } else {
1715 Const = GR->buildConstantInt(0, MIRBuilder, SpvTy);
1716 }
1717 if (!LocalWorkSize.isValid())
1718 LocalWorkSize = Const;
1719 if (!GlobalWorkOffset.isValid())
1720 GlobalWorkOffset = Const;
1721 }
1722 assert(LocalWorkSize.isValid() && GlobalWorkOffset.isValid());
1723 MIRBuilder.buildInstr(SPIRV::OpBuildNDRange)
1724 .addDef(TmpReg)
1725 .addUse(TypeReg)
1726 .addUse(GlobalWorkSize)
1727 .addUse(LocalWorkSize)
1728 .addUse(GlobalWorkOffset);
1729 return MIRBuilder.buildInstr(SPIRV::OpStore)
1730 .addUse(Call->Arguments[0])
1731 .addUse(TmpReg);
1732}
1733
1736 // We expect the following sequence of instructions:
1737 // %0:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.alloca)
1738 // or = G_GLOBAL_VALUE @block_literal_global
1739 // %1:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.bitcast), %0
1740 // %2:_(p4) = G_ADDRSPACE_CAST %1:_(pN)
1741 MachineInstr *MI = MRI->getUniqueVRegDef(ParamReg);
1742 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST &&
1743 MI->getOperand(1).isReg());
1744 Register BitcastReg = MI->getOperand(1).getReg();
1745 MachineInstr *BitcastMI = MRI->getUniqueVRegDef(BitcastReg);
1746 assert(isSpvIntrinsic(*BitcastMI, Intrinsic::spv_bitcast) &&
1747 BitcastMI->getOperand(2).isReg());
1748 Register ValueReg = BitcastMI->getOperand(2).getReg();
1749 MachineInstr *ValueMI = MRI->getUniqueVRegDef(ValueReg);
1750 return ValueMI;
1751}
1752
1753// Return an integer constant corresponding to the given register and
1754// defined in spv_track_constant.
1755// TODO: maybe unify with prelegalizer pass.
1757 MachineInstr *DefMI = MRI->getUniqueVRegDef(Reg);
1758 assert(isSpvIntrinsic(*DefMI, Intrinsic::spv_track_constant) &&
1759 DefMI->getOperand(2).isReg());
1760 MachineInstr *DefMI2 = MRI->getUniqueVRegDef(DefMI->getOperand(2).getReg());
1761 assert(DefMI2->getOpcode() == TargetOpcode::G_CONSTANT &&
1762 DefMI2->getOperand(1).isCImm());
1763 return DefMI2->getOperand(1).getCImm()->getValue().getZExtValue();
1764}
1765
1766// Return type of the instruction result from spv_assign_type intrinsic.
1767// TODO: maybe unify with prelegalizer pass.
1769 MachineInstr *NextMI = MI->getNextNode();
1770 if (isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_name))
1771 NextMI = NextMI->getNextNode();
1772 Register ValueReg = MI->getOperand(0).getReg();
1773 if (!isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_type) ||
1774 NextMI->getOperand(1).getReg() != ValueReg)
1775 return nullptr;
1776 Type *Ty = getMDOperandAsType(NextMI->getOperand(2).getMetadata(), 0);
1777 assert(Ty && "Type is expected");
1778 return getTypedPtrEltType(Ty);
1779}
1780
1781static const Type *getBlockStructType(Register ParamReg,
1783 // In principle, this information should be passed to us from Clang via
1784 // an elementtype attribute. However, said attribute requires that
1785 // the function call be an intrinsic, which is not. Instead, we rely on being
1786 // able to trace this to the declaration of a variable: OpenCL C specification
1787 // section 6.12.5 should guarantee that we can do this.
1788 MachineInstr *MI = getBlockStructInstr(ParamReg, MRI);
1789 if (MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE)
1790 return getTypedPtrEltType(MI->getOperand(1).getGlobal()->getType());
1791 assert(isSpvIntrinsic(*MI, Intrinsic::spv_alloca) &&
1792 "Blocks in OpenCL C must be traceable to allocation site");
1793 return getMachineInstrType(MI);
1794}
1795
1796// TODO: maybe move to the global register.
1797static SPIRVType *
1799 SPIRVGlobalRegistry *GR) {
1800 LLVMContext &Context = MIRBuilder.getMF().getFunction().getContext();
1801 Type *OpaqueType = StructType::getTypeByName(Context, "spirv.DeviceEvent");
1802 if (!OpaqueType)
1803 OpaqueType = StructType::getTypeByName(Context, "opencl.clk_event_t");
1804 if (!OpaqueType)
1805 OpaqueType = StructType::create(Context, "spirv.DeviceEvent");
1806 unsigned SC0 = storageClassToAddressSpace(SPIRV::StorageClass::Function);
1807 unsigned SC1 = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
1808 Type *PtrType = PointerType::get(PointerType::get(OpaqueType, SC0), SC1);
1809 return GR->getOrCreateSPIRVType(PtrType, MIRBuilder);
1810}
1811
1813 MachineIRBuilder &MIRBuilder,
1814 SPIRVGlobalRegistry *GR) {
1815 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1816 const DataLayout &DL = MIRBuilder.getDataLayout();
1817 bool HasEvents = Call->Builtin->Name.contains("events");
1818 const SPIRVType *Int32Ty = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
1819
1820 // Make vararg instructions before OpEnqueueKernel.
1821 // Local sizes arguments: Sizes of block invoke arguments. Clang generates
1822 // local size operands as an array, so we need to unpack them.
1823 SmallVector<Register, 16> LocalSizes;
1824 if (Call->Builtin->Name.find("_varargs") != StringRef::npos) {
1825 const unsigned LocalSizeArrayIdx = HasEvents ? 9 : 6;
1826 Register GepReg = Call->Arguments[LocalSizeArrayIdx];
1827 MachineInstr *GepMI = MRI->getUniqueVRegDef(GepReg);
1828 assert(isSpvIntrinsic(*GepMI, Intrinsic::spv_gep) &&
1829 GepMI->getOperand(3).isReg());
1830 Register ArrayReg = GepMI->getOperand(3).getReg();
1831 MachineInstr *ArrayMI = MRI->getUniqueVRegDef(ArrayReg);
1832 const Type *LocalSizeTy = getMachineInstrType(ArrayMI);
1833 assert(LocalSizeTy && "Local size type is expected");
1834 const uint64_t LocalSizeNum =
1835 cast<ArrayType>(LocalSizeTy)->getNumElements();
1836 unsigned SC = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
1837 const LLT LLType = LLT::pointer(SC, GR->getPointerSize());
1838 const SPIRVType *PointerSizeTy = GR->getOrCreateSPIRVPointerType(
1839 Int32Ty, MIRBuilder, SPIRV::StorageClass::Function);
1840 for (unsigned I = 0; I < LocalSizeNum; ++I) {
1841 Register Reg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1842 MRI->setType(Reg, LLType);
1843 GR->assignSPIRVTypeToVReg(PointerSizeTy, Reg, MIRBuilder.getMF());
1844 auto GEPInst = MIRBuilder.buildIntrinsic(
1845 Intrinsic::spv_gep, ArrayRef<Register>{Reg}, true, false);
1846 GEPInst
1847 .addImm(GepMI->getOperand(2).getImm()) // In bound.
1848 .addUse(ArrayMI->getOperand(0).getReg()) // Alloca.
1849 .addUse(buildConstantIntReg(0, MIRBuilder, GR)) // Indices.
1850 .addUse(buildConstantIntReg(I, MIRBuilder, GR));
1851 LocalSizes.push_back(Reg);
1852 }
1853 }
1854
1855 // SPIRV OpEnqueueKernel instruction has 10+ arguments.
1856 auto MIB = MIRBuilder.buildInstr(SPIRV::OpEnqueueKernel)
1857 .addDef(Call->ReturnRegister)
1859
1860 // Copy all arguments before block invoke function pointer.
1861 const unsigned BlockFIdx = HasEvents ? 6 : 3;
1862 for (unsigned i = 0; i < BlockFIdx; i++)
1863 MIB.addUse(Call->Arguments[i]);
1864
1865 // If there are no event arguments in the original call, add dummy ones.
1866 if (!HasEvents) {
1867 MIB.addUse(buildConstantIntReg(0, MIRBuilder, GR)); // Dummy num events.
1868 Register NullPtr = GR->getOrCreateConstNullPtr(
1869 MIRBuilder, getOrCreateSPIRVDeviceEventPointer(MIRBuilder, GR));
1870 MIB.addUse(NullPtr); // Dummy wait events.
1871 MIB.addUse(NullPtr); // Dummy ret event.
1872 }
1873
1874 MachineInstr *BlockMI = getBlockStructInstr(Call->Arguments[BlockFIdx], MRI);
1875 assert(BlockMI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
1876 // Invoke: Pointer to invoke function.
1877 MIB.addGlobalAddress(BlockMI->getOperand(1).getGlobal());
1878
1879 Register BlockLiteralReg = Call->Arguments[BlockFIdx + 1];
1880 // Param: Pointer to block literal.
1881 MIB.addUse(BlockLiteralReg);
1882
1883 Type *PType = const_cast<Type *>(getBlockStructType(BlockLiteralReg, MRI));
1884 // TODO: these numbers should be obtained from block literal structure.
1885 // Param Size: Size of block literal structure.
1886 MIB.addUse(buildConstantIntReg(DL.getTypeStoreSize(PType), MIRBuilder, GR));
1887 // Param Aligment: Aligment of block literal structure.
1888 MIB.addUse(
1889 buildConstantIntReg(DL.getPrefTypeAlign(PType).value(), MIRBuilder, GR));
1890
1891 for (unsigned i = 0; i < LocalSizes.size(); i++)
1892 MIB.addUse(LocalSizes[i]);
1893 return true;
1894}
1895
1897 MachineIRBuilder &MIRBuilder,
1898 SPIRVGlobalRegistry *GR) {
1899 // Lookup the instruction opcode in the TableGen records.
1900 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1901 unsigned Opcode =
1902 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1903
1904 switch (Opcode) {
1905 case SPIRV::OpRetainEvent:
1906 case SPIRV::OpReleaseEvent:
1907 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1908 return MIRBuilder.buildInstr(Opcode).addUse(Call->Arguments[0]);
1909 case SPIRV::OpCreateUserEvent:
1910 case SPIRV::OpGetDefaultQueue:
1911 return MIRBuilder.buildInstr(Opcode)
1912 .addDef(Call->ReturnRegister)
1913 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1914 case SPIRV::OpIsValidEvent:
1915 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1916 return MIRBuilder.buildInstr(Opcode)
1917 .addDef(Call->ReturnRegister)
1918 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1919 .addUse(Call->Arguments[0]);
1920 case SPIRV::OpSetUserEventStatus:
1921 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1922 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
1923 return MIRBuilder.buildInstr(Opcode)
1924 .addUse(Call->Arguments[0])
1925 .addUse(Call->Arguments[1]);
1926 case SPIRV::OpCaptureEventProfilingInfo:
1927 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1928 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
1929 MIRBuilder.getMRI()->setRegClass(Call->Arguments[2], &SPIRV::IDRegClass);
1930 return MIRBuilder.buildInstr(Opcode)
1931 .addUse(Call->Arguments[0])
1932 .addUse(Call->Arguments[1])
1933 .addUse(Call->Arguments[2]);
1934 case SPIRV::OpBuildNDRange:
1935 return buildNDRange(Call, MIRBuilder, GR);
1936 case SPIRV::OpEnqueueKernel:
1937 return buildEnqueueKernel(Call, MIRBuilder, GR);
1938 default:
1939 return false;
1940 }
1941}
1942
1944 MachineIRBuilder &MIRBuilder,
1945 SPIRVGlobalRegistry *GR) {
1946 // Lookup the instruction opcode in the TableGen records.
1947 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1948 unsigned Opcode =
1949 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1950 auto Scope = buildConstantIntReg(SPIRV::Scope::Workgroup, MIRBuilder, GR);
1951
1952 switch (Opcode) {
1953 case SPIRV::OpGroupAsyncCopy:
1954 return MIRBuilder.buildInstr(Opcode)
1955 .addDef(Call->ReturnRegister)
1956 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1957 .addUse(Scope)
1958 .addUse(Call->Arguments[0])
1959 .addUse(Call->Arguments[1])
1960 .addUse(Call->Arguments[2])
1961 .addUse(buildConstantIntReg(1, MIRBuilder, GR))
1962 .addUse(Call->Arguments[3]);
1963 case SPIRV::OpGroupWaitEvents:
1964 return MIRBuilder.buildInstr(Opcode)
1965 .addUse(Scope)
1966 .addUse(Call->Arguments[0])
1967 .addUse(Call->Arguments[1]);
1968 default:
1969 return false;
1970 }
1971}
1972
1973static bool generateConvertInst(const StringRef DemangledCall,
1974 const SPIRV::IncomingCall *Call,
1975 MachineIRBuilder &MIRBuilder,
1976 SPIRVGlobalRegistry *GR) {
1977 // Lookup the conversion builtin in the TableGen records.
1978 const SPIRV::ConvertBuiltin *Builtin =
1979 SPIRV::lookupConvertBuiltin(Call->Builtin->Name, Call->Builtin->Set);
1980
1981 if (Builtin->IsSaturated)
1982 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
1983 SPIRV::Decoration::SaturatedConversion, {});
1984 if (Builtin->IsRounded)
1985 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
1986 SPIRV::Decoration::FPRoundingMode,
1987 {(unsigned)Builtin->RoundingMode});
1988
1989 unsigned Opcode = SPIRV::OpNop;
1990 if (GR->isScalarOrVectorOfType(Call->Arguments[0], SPIRV::OpTypeInt)) {
1991 // Int -> ...
1992 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
1993 // Int -> Int
1994 if (Builtin->IsSaturated)
1995 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpSatConvertUToS
1996 : SPIRV::OpSatConvertSToU;
1997 else
1998 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpUConvert
1999 : SPIRV::OpSConvert;
2000 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2001 SPIRV::OpTypeFloat)) {
2002 // Int -> Float
2003 bool IsSourceSigned =
2004 DemangledCall[DemangledCall.find_first_of('(') + 1] != 'u';
2005 Opcode = IsSourceSigned ? SPIRV::OpConvertSToF : SPIRV::OpConvertUToF;
2006 }
2007 } else if (GR->isScalarOrVectorOfType(Call->Arguments[0],
2008 SPIRV::OpTypeFloat)) {
2009 // Float -> ...
2010 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt))
2011 // Float -> Int
2012 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpConvertFToS
2013 : SPIRV::OpConvertFToU;
2014 else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2015 SPIRV::OpTypeFloat))
2016 // Float -> Float
2017 Opcode = SPIRV::OpFConvert;
2018 }
2019
2020 assert(Opcode != SPIRV::OpNop &&
2021 "Conversion between the types not implemented!");
2022
2023 MIRBuilder.buildInstr(Opcode)
2024 .addDef(Call->ReturnRegister)
2025 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2026 .addUse(Call->Arguments[0]);
2027 return true;
2028}
2029
2031 MachineIRBuilder &MIRBuilder,
2032 SPIRVGlobalRegistry *GR) {
2033 // Lookup the vector load/store builtin in the TableGen records.
2034 const SPIRV::VectorLoadStoreBuiltin *Builtin =
2035 SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2036 Call->Builtin->Set);
2037 // Build extended instruction.
2038 auto MIB =
2039 MIRBuilder.buildInstr(SPIRV::OpExtInst)
2040 .addDef(Call->ReturnRegister)
2041 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2042 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
2043 .addImm(Builtin->Number);
2044 for (auto Argument : Call->Arguments)
2045 MIB.addUse(Argument);
2046 MIB.addImm(Builtin->ElementCount);
2047
2048 // Rounding mode should be passed as a last argument in the MI for builtins
2049 // like "vstorea_halfn_r".
2050 if (Builtin->IsRounded)
2051 MIB.addImm(static_cast<uint32_t>(Builtin->RoundingMode));
2052 return true;
2053}
2054
2056 MachineIRBuilder &MIRBuilder,
2057 SPIRVGlobalRegistry *GR) {
2058 // Lookup the instruction opcode in the TableGen records.
2059 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2060 unsigned Opcode =
2061 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2062 bool IsLoad = Opcode == SPIRV::OpLoad;
2063 // Build the instruction.
2064 auto MIB = MIRBuilder.buildInstr(Opcode);
2065 if (IsLoad) {
2066 MIB.addDef(Call->ReturnRegister);
2067 MIB.addUse(GR->getSPIRVTypeID(Call->ReturnType));
2068 }
2069 // Add a pointer to the value to load/store.
2070 MIB.addUse(Call->Arguments[0]);
2071 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2072 MRI->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
2073 // Add a value to store.
2074 if (!IsLoad) {
2075 MIB.addUse(Call->Arguments[1]);
2076 MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
2077 }
2078 // Add optional memory attributes and an alignment.
2079 unsigned NumArgs = Call->Arguments.size();
2080 if ((IsLoad && NumArgs >= 2) || NumArgs >= 3) {
2081 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 1 : 2], MRI));
2082 MRI->setRegClass(Call->Arguments[IsLoad ? 1 : 2], &SPIRV::IDRegClass);
2083 }
2084 if ((IsLoad && NumArgs >= 3) || NumArgs >= 4) {
2085 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 2 : 3], MRI));
2086 MRI->setRegClass(Call->Arguments[IsLoad ? 2 : 3], &SPIRV::IDRegClass);
2087 }
2088 return true;
2089}
2090
2091/// Lowers a builtin funtion call using the provided \p DemangledCall skeleton
2092/// and external instruction \p Set.
2093namespace SPIRV {
2094std::optional<bool> lowerBuiltin(const StringRef DemangledCall,
2095 SPIRV::InstructionSet::InstructionSet Set,
2096 MachineIRBuilder &MIRBuilder,
2097 const Register OrigRet, const Type *OrigRetTy,
2098 const SmallVectorImpl<Register> &Args,
2099 SPIRVGlobalRegistry *GR) {
2100 LLVM_DEBUG(dbgs() << "Lowering builtin call: " << DemangledCall << "\n");
2101
2102 // SPIR-V type and return register.
2103 Register ReturnRegister = OrigRet;
2104 SPIRVType *ReturnType = nullptr;
2105 if (OrigRetTy && !OrigRetTy->isVoidTy()) {
2106 ReturnType = GR->assignTypeToVReg(OrigRetTy, OrigRet, MIRBuilder);
2107 if (!MIRBuilder.getMRI()->getRegClassOrNull(ReturnRegister))
2108 MIRBuilder.getMRI()->setRegClass(ReturnRegister, &SPIRV::IDRegClass);
2109 } else if (OrigRetTy && OrigRetTy->isVoidTy()) {
2110 ReturnRegister = MIRBuilder.getMRI()->createVirtualRegister(&IDRegClass);
2111 MIRBuilder.getMRI()->setType(ReturnRegister, LLT::scalar(32));
2112 ReturnType = GR->assignTypeToVReg(OrigRetTy, ReturnRegister, MIRBuilder);
2113 }
2114
2115 // Lookup the builtin in the TableGen records.
2116 std::unique_ptr<const IncomingCall> Call =
2117 lookupBuiltin(DemangledCall, Set, ReturnRegister, ReturnType, Args);
2118
2119 if (!Call) {
2120 LLVM_DEBUG(dbgs() << "Builtin record was not found!\n");
2121 return std::nullopt;
2122 }
2123
2124 // TODO: check if the provided args meet the builtin requirments.
2125 assert(Args.size() >= Call->Builtin->MinNumArgs &&
2126 "Too few arguments to generate the builtin");
2127 if (Call->Builtin->MaxNumArgs && Args.size() > Call->Builtin->MaxNumArgs)
2128 LLVM_DEBUG(dbgs() << "More arguments provided than required!\n");
2129
2130 // Match the builtin with implementation based on the grouping.
2131 switch (Call->Builtin->Group) {
2132 case SPIRV::Extended:
2133 return generateExtInst(Call.get(), MIRBuilder, GR);
2134 case SPIRV::Relational:
2135 return generateRelationalInst(Call.get(), MIRBuilder, GR);
2136 case SPIRV::Group:
2137 return generateGroupInst(Call.get(), MIRBuilder, GR);
2138 case SPIRV::Variable:
2139 return generateBuiltinVar(Call.get(), MIRBuilder, GR);
2140 case SPIRV::Atomic:
2141 return generateAtomicInst(Call.get(), MIRBuilder, GR);
2142 case SPIRV::AtomicFloating:
2143 return generateAtomicFloatingInst(Call.get(), MIRBuilder, GR);
2144 case SPIRV::Barrier:
2145 return generateBarrierInst(Call.get(), MIRBuilder, GR);
2146 case SPIRV::Dot:
2147 return generateDotOrFMulInst(Call.get(), MIRBuilder, GR);
2148 case SPIRV::GetQuery:
2149 return generateGetQueryInst(Call.get(), MIRBuilder, GR);
2150 case SPIRV::ImageSizeQuery:
2151 return generateImageSizeQueryInst(Call.get(), MIRBuilder, GR);
2152 case SPIRV::ImageMiscQuery:
2153 return generateImageMiscQueryInst(Call.get(), MIRBuilder, GR);
2154 case SPIRV::ReadImage:
2155 return generateReadImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
2156 case SPIRV::WriteImage:
2157 return generateWriteImageInst(Call.get(), MIRBuilder, GR);
2158 case SPIRV::SampleImage:
2159 return generateSampleImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
2160 case SPIRV::Select:
2161 return generateSelectInst(Call.get(), MIRBuilder);
2162 case SPIRV::SpecConstant:
2163 return generateSpecConstantInst(Call.get(), MIRBuilder, GR);
2164 case SPIRV::Enqueue:
2165 return generateEnqueueInst(Call.get(), MIRBuilder, GR);
2166 case SPIRV::AsyncCopy:
2167 return generateAsyncCopy(Call.get(), MIRBuilder, GR);
2168 case SPIRV::Convert:
2169 return generateConvertInst(DemangledCall, Call.get(), MIRBuilder, GR);
2170 case SPIRV::VectorLoadStore:
2171 return generateVectorLoadStoreInst(Call.get(), MIRBuilder, GR);
2172 case SPIRV::LoadStore:
2173 return generateLoadStoreInst(Call.get(), MIRBuilder, GR);
2174 case SPIRV::IntelSubgroups:
2175 return generateIntelSubgroupsInst(Call.get(), MIRBuilder, GR);
2176 case SPIRV::GroupUniform:
2177 return generateGroupUniformInst(Call.get(), MIRBuilder, GR);
2178 }
2179 return false;
2180}
2181
2185};
2186
2187#define GET_BuiltinTypes_DECL
2188#define GET_BuiltinTypes_IMPL
2189
2193};
2194
2195#define GET_OpenCLTypes_DECL
2196#define GET_OpenCLTypes_IMPL
2197
2198#include "SPIRVGenTables.inc"
2199} // namespace SPIRV
2200
2201//===----------------------------------------------------------------------===//
2202// Misc functions for parsing builtin types.
2203//===----------------------------------------------------------------------===//
2204
2206 if (Name.starts_with("void"))
2207 return Type::getVoidTy(Context);
2208 else if (Name.starts_with("int") || Name.starts_with("uint"))
2209 return Type::getInt32Ty(Context);
2210 else if (Name.starts_with("float"))
2211 return Type::getFloatTy(Context);
2212 else if (Name.starts_with("half"))
2213 return Type::getHalfTy(Context);
2214 report_fatal_error("Unable to recognize type!");
2215}
2216
2217//===----------------------------------------------------------------------===//
2218// Implementation functions for builtin types.
2219//===----------------------------------------------------------------------===//
2220
2222 const SPIRV::BuiltinType *TypeRecord,
2223 MachineIRBuilder &MIRBuilder,
2224 SPIRVGlobalRegistry *GR) {
2225 unsigned Opcode = TypeRecord->Opcode;
2226 // Create or get an existing type from GlobalRegistry.
2227 return GR->getOrCreateOpTypeByOpcode(ExtensionType, MIRBuilder, Opcode);
2228}
2229
2231 SPIRVGlobalRegistry *GR) {
2232 // Create or get an existing type from GlobalRegistry.
2233 return GR->getOrCreateOpTypeSampler(MIRBuilder);
2234}
2235
2236static SPIRVType *getPipeType(const TargetExtType *ExtensionType,
2237 MachineIRBuilder &MIRBuilder,
2238 SPIRVGlobalRegistry *GR) {
2239 assert(ExtensionType->getNumIntParameters() == 1 &&
2240 "Invalid number of parameters for SPIR-V pipe builtin!");
2241 // Create or get an existing type from GlobalRegistry.
2242 return GR->getOrCreateOpTypePipe(MIRBuilder,
2243 SPIRV::AccessQualifier::AccessQualifier(
2244 ExtensionType->getIntParameter(0)));
2245}
2246
2247static SPIRVType *
2248getImageType(const TargetExtType *ExtensionType,
2249 const SPIRV::AccessQualifier::AccessQualifier Qualifier,
2250 MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
2251 assert(ExtensionType->getNumTypeParameters() == 1 &&
2252 "SPIR-V image builtin type must have sampled type parameter!");
2253 const SPIRVType *SampledType =
2254 GR->getOrCreateSPIRVType(ExtensionType->getTypeParameter(0), MIRBuilder);
2255 assert(ExtensionType->getNumIntParameters() == 7 &&
2256 "Invalid number of parameters for SPIR-V image builtin!");
2257 // Create or get an existing type from GlobalRegistry.
2258 return GR->getOrCreateOpTypeImage(
2259 MIRBuilder, SampledType,
2260 SPIRV::Dim::Dim(ExtensionType->getIntParameter(0)),
2261 ExtensionType->getIntParameter(1), ExtensionType->getIntParameter(2),
2262 ExtensionType->getIntParameter(3), ExtensionType->getIntParameter(4),
2263 SPIRV::ImageFormat::ImageFormat(ExtensionType->getIntParameter(5)),
2264 Qualifier == SPIRV::AccessQualifier::WriteOnly
2265 ? SPIRV::AccessQualifier::WriteOnly
2266 : SPIRV::AccessQualifier::AccessQualifier(
2267 ExtensionType->getIntParameter(6)));
2268}
2269
2271 MachineIRBuilder &MIRBuilder,
2272 SPIRVGlobalRegistry *GR) {
2273 SPIRVType *OpaqueImageType = getImageType(
2274 OpaqueType, SPIRV::AccessQualifier::ReadOnly, MIRBuilder, GR);
2275 // Create or get an existing type from GlobalRegistry.
2276 return GR->getOrCreateOpTypeSampledImage(OpaqueImageType, MIRBuilder);
2277}
2278
2279namespace SPIRV {
2280const TargetExtType *
2282 MachineIRBuilder &MIRBuilder) {
2283 StringRef NameWithParameters = TypeName;
2284
2285 // Pointers-to-opaque-structs representing OpenCL types are first translated
2286 // to equivalent SPIR-V types. OpenCL builtin type names should have the
2287 // following format: e.g. %opencl.event_t
2288 if (NameWithParameters.starts_with("opencl.")) {
2289 const SPIRV::OpenCLType *OCLTypeRecord =
2290 SPIRV::lookupOpenCLType(NameWithParameters);
2291 if (!OCLTypeRecord)
2292 report_fatal_error("Missing TableGen record for OpenCL type: " +
2293 NameWithParameters);
2294 NameWithParameters = OCLTypeRecord->SpirvTypeLiteral;
2295 // Continue with the SPIR-V builtin type...
2296 }
2297
2298 // Names of the opaque structs representing a SPIR-V builtins without
2299 // parameters should have the following format: e.g. %spirv.Event
2300 assert(NameWithParameters.starts_with("spirv.") &&
2301 "Unknown builtin opaque type!");
2302
2303 // Parameterized SPIR-V builtins names follow this format:
2304 // e.g. %spirv.Image._void_1_0_0_0_0_0_0, %spirv.Pipe._0
2305 if (!NameWithParameters.contains('_'))
2306 return TargetExtType::get(MIRBuilder.getContext(), NameWithParameters);
2307
2308 SmallVector<StringRef> Parameters;
2309 unsigned BaseNameLength = NameWithParameters.find('_') - 1;
2310 SplitString(NameWithParameters.substr(BaseNameLength + 1), Parameters, "_");
2311
2312 SmallVector<Type *, 1> TypeParameters;
2313 bool HasTypeParameter = !isDigit(Parameters[0][0]);
2314 if (HasTypeParameter)
2315 TypeParameters.push_back(parseTypeString(
2316 Parameters[0], MIRBuilder.getMF().getFunction().getContext()));
2317 SmallVector<unsigned> IntParameters;
2318 for (unsigned i = HasTypeParameter ? 1 : 0; i < Parameters.size(); i++) {
2319 unsigned IntParameter = 0;
2320 bool ValidLiteral = !Parameters[i].getAsInteger(10, IntParameter);
2321 (void)ValidLiteral;
2322 assert(ValidLiteral &&
2323 "Invalid format of SPIR-V builtin parameter literal!");
2324 IntParameters.push_back(IntParameter);
2325 }
2326 return TargetExtType::get(MIRBuilder.getContext(),
2327 NameWithParameters.substr(0, BaseNameLength),
2328 TypeParameters, IntParameters);
2329}
2330
2332 SPIRV::AccessQualifier::AccessQualifier AccessQual,
2333 MachineIRBuilder &MIRBuilder,
2334 SPIRVGlobalRegistry *GR) {
2335 // In LLVM IR, SPIR-V and OpenCL builtin types are represented as either
2336 // target(...) target extension types or pointers-to-opaque-structs. The
2337 // approach relying on structs is deprecated and works only in the non-opaque
2338 // pointer mode (-opaque-pointers=0).
2339 // In order to maintain compatibility with LLVM IR generated by older versions
2340 // of Clang and LLVM/SPIR-V Translator, the pointers-to-opaque-structs are
2341 // "translated" to target extension types. This translation is temporary and
2342 // will be removed in the future release of LLVM.
2343 const TargetExtType *BuiltinType = dyn_cast<TargetExtType>(OpaqueType);
2344 if (!BuiltinType)
2346 OpaqueType->getStructName().str(), MIRBuilder);
2347
2348 unsigned NumStartingVRegs = MIRBuilder.getMRI()->getNumVirtRegs();
2349
2350 const StringRef Name = BuiltinType->getName();
2351 LLVM_DEBUG(dbgs() << "Lowering builtin type: " << Name << "\n");
2352
2353 // Lookup the demangled builtin type in the TableGen records.
2354 const SPIRV::BuiltinType *TypeRecord = SPIRV::lookupBuiltinType(Name);
2355 if (!TypeRecord)
2356 report_fatal_error("Missing TableGen record for builtin type: " + Name);
2357
2358 // "Lower" the BuiltinType into TargetType. The following get<...>Type methods
2359 // use the implementation details from TableGen records or TargetExtType
2360 // parameters to either create a new OpType<...> machine instruction or get an
2361 // existing equivalent SPIRVType from GlobalRegistry.
2362 SPIRVType *TargetType;
2363 switch (TypeRecord->Opcode) {
2364 case SPIRV::OpTypeImage:
2365 TargetType = getImageType(BuiltinType, AccessQual, MIRBuilder, GR);
2366 break;
2367 case SPIRV::OpTypePipe:
2368 TargetType = getPipeType(BuiltinType, MIRBuilder, GR);
2369 break;
2370 case SPIRV::OpTypeDeviceEvent:
2371 TargetType = GR->getOrCreateOpTypeDeviceEvent(MIRBuilder);
2372 break;
2373 case SPIRV::OpTypeSampler:
2374 TargetType = getSamplerType(MIRBuilder, GR);
2375 break;
2376 case SPIRV::OpTypeSampledImage:
2377 TargetType = getSampledImageType(BuiltinType, MIRBuilder, GR);
2378 break;
2379 default:
2380 TargetType =
2381 getNonParameterizedType(BuiltinType, TypeRecord, MIRBuilder, GR);
2382 break;
2383 }
2384
2385 // Emit OpName instruction if a new OpType<...> instruction was added
2386 // (equivalent type was not found in GlobalRegistry).
2387 if (NumStartingVRegs < MIRBuilder.getMRI()->getNumVirtRegs())
2388 buildOpName(GR->getSPIRVTypeID(TargetType), Name, MIRBuilder);
2389
2390 return TargetType;
2391}
2392} // namespace SPIRV
2393} // namespace llvm
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
AMDGPU Lower Kernel Arguments
return RetTy
#define LLVM_DEBUG(X)
Definition: Debug.h:101
std::string Name
uint64_t Size
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned Reg
IntegerType * Int32Ty
LLVMContext & Context
static bool isDigit(const char C)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some functions that are useful when dealing with strings.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
APInt bitcastToAPInt() const
Definition: APFloat.h:1210
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Definition: APFloat.h:957
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:212
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1485
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Definition: Type.cpp:647
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:990
@ ICMP_EQ
equal
Definition: InstrTypes.h:986
const APFloat & getValueAPF() const
Definition: Constants.h:310
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:144
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
Tagged union holding either a T or a Error.
Definition: Error.h:474
Class to represent fixed width SIMD vectors.
Definition: DerivedTypes.h:539
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:342
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:278
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
Definition: LowLevelType.h:56
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
Definition: LowLevelType.h:49
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
Definition: LowLevelType.h:92
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
LLVMContext & getContext() const
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
const DataLayout & getDataLayout() const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:544
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:554
MachineOperand class - Representation of each machine instruction operand.
const GlobalValue * getGlobal() const
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
const MDNode * getMetadata() const
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
const TargetRegisterClass * getRegClassOrNull(Register Reg) const
Return the register class of Reg, or null if Reg has not been assigned a register class yet.
unsigned getNumVirtRegs() const
getNumVirtRegs - Return the number of virtual registers created.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isValid() const
Definition: Register.h:116
SPIRVType * getOrCreateOpTypePipe(MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AccQual)
Register getOrCreateConsIntVector(uint64_t Val, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
SPIRVType * getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder)
const Type * getTypeForSPIRVType(const SPIRVType *Ty) const
Register buildConstantSampler(Register Res, unsigned AddrMode, unsigned Param, unsigned FilerMode, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
SPIRVType * getOrCreateOpTypeImage(MachineIRBuilder &MIRBuilder, SPIRVType *SampledType, SPIRV::Dim::Dim Dim, uint32_t Depth, uint32_t Arrayed, uint32_t Multisampled, uint32_t Sampled, SPIRV::ImageFormat::ImageFormat ImageFormat, SPIRV::AccessQualifier::AccessQualifier AccQual)
SPIRVType * getSPIRVTypeForVReg(Register VReg) const
SPIRVType * getOrCreateOpTypeByOpcode(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode)
Register buildConstantFP(APFloat Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr)
Register getSPIRVTypeID(const SPIRVType *SpirvType) const
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite, bool EmitIR=true)
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, MachineFunction &MF)
bool isScalarOfType(Register VReg, unsigned TypeOpcode) const
Register buildGlobalVariable(Register Reg, SPIRVType *BaseType, StringRef Name, const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage, const MachineInstr *Init, bool IsConst, bool HasLinkageTy, SPIRV::LinkageType::LinkageType LinkageType, MachineIRBuilder &MIRBuilder, bool IsInstSelector)
SPIRVType * getOrCreateOpTypeSampledImage(SPIRVType *ImageType, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVTypeByName(StringRef TypeStr, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SC=SPIRV::StorageClass::Function, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite)
SPIRVType * assignTypeToVReg(const Type *Type, Register VReg, MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite, bool EmitIR=true)
bool isScalarOrVectorOfType(Register VReg, unsigned TypeOpcode) const
SPIRVType * getOrCreateOpTypeDeviceEvent(MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVPointerType(SPIRVType *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SClass=SPIRV::StorageClass::Function)
SPIRVType * getOrCreateSPIRVVectorType(SPIRVType *BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineIRBuilder &MIRBuilder)
Register getOrCreateConsIntArray(uint64_t Val, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
SPIRV::StorageClass::StorageClass getPointerStorageClass(Register VReg) const
SPIRVType * getOrCreateOpTypeSampler(MachineIRBuilder &MIRBuilder)
Register buildConstantInt(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr, bool EmitIR=true)
Register getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorBitWidth(const SPIRVType *Type) const
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:696
std::string str() const
str - Get the contents as an std::string.
Definition: StringRef.h:222
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition: StringRef.h:567
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:257
bool contains_insensitive(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:432
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
Definition: StringRef.h:680
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:420
size_t find_first_of(char C, size_t From=0) const
Find the first character in the string that is C, or npos if not found.
Definition: StringRef.h:373
size_t find(char C, size_t From=0) const
Search for the first character C in the string.
Definition: StringRef.h:293
static constexpr size_t npos
Definition: StringRef.h:52
Class to represent struct types.
Definition: DerivedTypes.h:216
static StructType * getTypeByName(LLVMContext &C, StringRef Name)
Return the type with the specified name, or null if there is none by that name.
Definition: Type.cpp:632
static StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Definition: Type.cpp:513
Class to represent target extensions types, which are generally unintrospectable from target-independ...
Definition: DerivedTypes.h:720
unsigned getNumIntParameters() const
Definition: DerivedTypes.h:765
static TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types=std::nullopt, ArrayRef< unsigned > Ints=std::nullopt)
Return a target extension type having the specified name and optional type and integer parameters.
Definition: Type.cpp:796
Type * getTypeParameter(unsigned i) const
Definition: DerivedTypes.h:755
unsigned getNumTypeParameters() const
Definition: DerivedTypes.h:756
unsigned getIntParameter(unsigned i) const
Definition: DerivedTypes.h:764
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static Type * getHalfTy(LLVMContext &C)
StringRef getStructName() const
static Type * getVoidTy(LLVMContext &C)
static IntegerType * getInt32Ty(LLVMContext &C)
static Type * getFloatTy(LLVMContext &C)
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:140
LLVM Value Representation.
Definition: Value.h:74
Value(Type *Ty, unsigned scid)
Definition: Value.cpp:53
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:316
LLVMTypeRef LLVMVectorType(LLVMTypeRef ElementType, unsigned ElementCount)
Create a vector type that contains a defined type and has a specific number of elements.
Definition: Core.cpp:856
std::optional< bool > lowerBuiltin(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, MachineIRBuilder &MIRBuilder, const Register OrigRet, const Type *OrigRetTy, const SmallVectorImpl< Register > &Args, SPIRVGlobalRegistry *GR)
SPIRVType * lowerBuiltinType(const Type *OpaqueType, SPIRV::AccessQualifier::AccessQualifier AccessQual, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
const TargetExtType * parseBuiltinTypeNameToTargetExtType(std::string TypeName, MachineIRBuilder &MIRBuilder)
Translates a string representing a SPIR-V or OpenCL builtin type to a TargetExtType that can be furth...
StorageClass
Definition: XCOFF.h:170
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
Definition: SPIRVUtils.cpp:100
unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
Definition: SPIRVUtils.cpp:138
static bool buildAtomicCompareExchangeInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic compare-exchange instruction.
static bool generateGetQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildConstantIntReg(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, unsigned BitWidth=32)
static bool buildAtomicFlagInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic flag instructions (e.g.
static bool generateImageSizeQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildBuiltinVariableLoad(MachineIRBuilder &MIRBuilder, SPIRVType *VariableType, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType, Register Reg=Register(0))
Helper function for building a load instruction for loading a builtin global variable of BuiltinValue...
static SPIRV::SamplerFilterMode::SamplerFilterMode getSamplerFilterModeFromBitmask(unsigned Bitmask)
static bool buildAtomicStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic store instruction.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
Definition: SPIRVUtils.cpp:80
static const Type * getBlockStructType(Register ParamReg, MachineRegisterInfo *MRI)
static bool generateGroupInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim)
Register insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy, SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Helper external function for inserting ASSIGN_TYPE instuction between Reg and its definition,...
static Register buildScopeReg(Register CLScopeRegister, SPIRV::Scope::Scope Scope, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI)
static std::tuple< Register, SPIRVType * > buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType, SPIRVGlobalRegistry *GR)
Helper function building either a resulting scalar or vector bool register depending on the expected ...
static unsigned getNumSizeComponents(SPIRVType *imgType)
Helper function for obtaining the number of size components.
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
Definition: SPIRVUtils.cpp:241
static SPIRVType * getSampledImageType(const TargetExtType *OpaqueType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
Definition: SPIRVUtils.cpp:190
static bool generateDotOrFMulInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSampleImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateBarrierInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateGroupUniformInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getImageType(const TargetExtType *ExtensionType, const SPIRV::AccessQualifier::AccessQualifier Qualifier, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Definition: SPIRVUtils.cpp:117
static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building barriers, i.e., memory/control ordering operations.
static bool generateAsyncCopy(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope)
static SPIRVType * getSamplerType(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
static Register buildLoadInst(SPIRVType *BaseType, Register PtrRegister, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, LLT LowLevelType, Register DestinationReg=Register(0))
Helper function for building a load instruction loading into the DestinationReg.
static bool generateEnqueueInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
static bool buildSelectInst(MachineIRBuilder &MIRBuilder, Register ReturnRegister, Register SourceRegister, const SPIRVType *ReturnType, SPIRVGlobalRegistry *GR)
Helper function for building either a vector or scalar select instruction depending on the expected R...
static const Type * getMachineInstrType(MachineInstr *MI)
static SPIRV::SamplerAddressingMode::SamplerAddressingMode getSamplerAddressingModeFromBitmask(unsigned Bitmask)
static bool generateAtomicInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateConvertInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildMemSemanticsReg(Register SemanticsRegister, Register PtrRegister, unsigned &Semantics, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static unsigned getConstFromIntrinsic(Register Reg, MachineRegisterInfo *MRI)
static bool generateImageMiscQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSelectInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic load instruction.
static bool generateIntelSubgroupsInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSpecConstantInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getOrCreateSPIRVDeviceEventPointer(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateVectorLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, uint64_t DefaultValue)
const Type * getTypedPtrEltType(const Type *Ty)
Definition: SPIRVUtils.cpp:344
static std::unique_ptr< const SPIRV::IncomingCall > lookupBuiltin(StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
Looks up the demangled builtin call in the SPIRVBuiltins.td records using the provided DemangledCall ...
static bool buildAtomicFloatingRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic floating-type instruction.
MachineInstr * getDefInstrMaybeConstant(Register &ConstReg, const MachineRegisterInfo *MRI)
Definition: SPIRVUtils.cpp:226
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
const MachineInstr SPIRVType
static bool generateReadImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildEnqueueKernel(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * getMDOperandAsType(const MDNode *N, unsigned I)
Definition: SPIRVUtils.cpp:253
static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic load instruction.
static SPIRV::MemorySemantics::MemorySemantics getSPIRVMemSemantics(std::memory_order MemOrder)
static bool generateRelationalInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicInitInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
Helper function for translating atomic init to OpStore.
static bool generateWriteImageInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getPipeType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Type * parseTypeString(const StringRef Name, LLVMContext &Context)
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
Definition: SPIRVUtils.cpp:247
static bool generateAtomicFloatingInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateExtInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildNDRange(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getNonParameterizedType(const TargetExtType *ExtensionType, const SPIRV::BuiltinType *TypeRecord, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static MachineInstr * getBlockStructInstr(Register ParamReg, MachineRegisterInfo *MRI)
static unsigned getSamplerParamFromBitmask(unsigned Bitmask)
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
static bool generateBuiltinVar(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static const fltSemantics & IEEEsingle() LLVM_READNONE
Definition: APFloat.cpp:249
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
This class contains a discriminated union of information about pointers in memory operands,...
FPRoundingMode::FPRoundingMode RoundingMode
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
BuiltIn::BuiltIn Value
InstructionSet::InstructionSet Set
const SmallVectorImpl< Register > & Arguments
const std::string BuiltinName
const SPIRVType * ReturnType
const Register ReturnRegister
const DemangledBuiltin * Builtin
IncomingCall(const std::string BuiltinName, const DemangledBuiltin *Builtin, const Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
FPRoundingMode::FPRoundingMode RoundingMode