LLVM 22.0.0git
SPIRVUtils.cpp
Go to the documentation of this file.
1//===--- SPIRVUtils.cpp ---- SPIR-V Utility Functions -----------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains miscellaneous utility functions.
10//
11//===----------------------------------------------------------------------===//
12
13#include "SPIRVUtils.h"
15#include "SPIRV.h"
16#include "SPIRVGlobalRegistry.h"
17#include "SPIRVInstrInfo.h"
18#include "SPIRVSubtarget.h"
19#include "llvm/ADT/StringRef.h"
26#include "llvm/IR/IntrinsicsSPIRV.h"
27#include <queue>
28#include <vector>
29
30namespace llvm {
31namespace SPIRV {
32// This code restores function args/retvalue types for composite cases
33// because the final types should still be aggregate whereas they're i32
34// during the translation to cope with aggregate flattening etc.
35// TODO: should these just return nullptr when there's no metadata?
37 FunctionType *FTy,
38 StringRef Name) {
39 if (!NMD)
40 return FTy;
41
42 constexpr auto getConstInt = [](MDNode *MD, unsigned OpId) -> ConstantInt * {
43 if (MD->getNumOperands() <= OpId)
44 return nullptr;
45 if (auto *CMeta = dyn_cast<ConstantAsMetadata>(MD->getOperand(OpId)))
46 return dyn_cast<ConstantInt>(CMeta->getValue());
47 return nullptr;
48 };
49
50 auto It = find_if(NMD->operands(), [Name](MDNode *N) {
51 if (auto *MDS = dyn_cast_or_null<MDString>(N->getOperand(0)))
52 return MDS->getString() == Name;
53 return false;
54 });
55
56 if (It == NMD->op_end())
57 return FTy;
58
59 Type *RetTy = FTy->getReturnType();
60 SmallVector<Type *, 4> PTys(FTy->params());
61
62 for (unsigned I = 1; I != (*It)->getNumOperands(); ++I) {
63 MDNode *MD = dyn_cast<MDNode>((*It)->getOperand(I));
64 assert(MD && "MDNode operand is expected");
65
66 if (auto *Const = getConstInt(MD, 0)) {
67 auto *CMeta = dyn_cast<ConstantAsMetadata>(MD->getOperand(1));
68 assert(CMeta && "ConstantAsMetadata operand is expected");
69 assert(Const->getSExtValue() >= -1);
70 // Currently -1 indicates return value, greater values mean
71 // argument numbers.
72 if (Const->getSExtValue() == -1)
73 RetTy = CMeta->getType();
74 else
75 PTys[Const->getSExtValue()] = CMeta->getType();
76 }
77 }
78
79 return FunctionType::get(RetTy, PTys, FTy->isVarArg());
80}
81
84 F.getParent()->getNamedMetadata("spv.cloned_funcs"), F.getFunctionType(),
85 F.getName());
86}
87
90 CB.getModule()->getNamedMetadata("spv.mutated_callsites"),
91 CB.getFunctionType(), CB.getName());
92}
93} // Namespace SPIRV
94
95// The following functions are used to add these string literals as a series of
96// 32-bit integer operands with the correct format, and unpack them if necessary
97// when making string comparisons in compiler passes.
98// SPIR-V requires null-terminated UTF-8 strings padded to 32-bit alignment.
99static uint32_t convertCharsToWord(const StringRef &Str, unsigned i) {
100 uint32_t Word = 0u; // Build up this 32-bit word from 4 8-bit chars.
101 for (unsigned WordIndex = 0; WordIndex < 4; ++WordIndex) {
102 unsigned StrIndex = i + WordIndex;
103 uint8_t CharToAdd = 0; // Initilize char as padding/null.
104 if (StrIndex < Str.size()) { // If it's within the string, get a real char.
105 CharToAdd = Str[StrIndex];
106 }
107 Word |= (CharToAdd << (WordIndex * 8));
108 }
109 return Word;
110}
111
112// Get length including padding and null terminator.
113static size_t getPaddedLen(const StringRef &Str) {
114 return (Str.size() + 4) & ~3;
115}
116
117void addStringImm(const StringRef &Str, MCInst &Inst) {
118 const size_t PaddedLen = getPaddedLen(Str);
119 for (unsigned i = 0; i < PaddedLen; i += 4) {
120 // Add an operand for the 32-bits of chars or padding.
122 }
123}
124
126 const size_t PaddedLen = getPaddedLen(Str);
127 for (unsigned i = 0; i < PaddedLen; i += 4) {
128 // Add an operand for the 32-bits of chars or padding.
129 MIB.addImm(convertCharsToWord(Str, i));
130 }
131}
132
134 std::vector<Value *> &Args) {
135 const size_t PaddedLen = getPaddedLen(Str);
136 for (unsigned i = 0; i < PaddedLen; i += 4) {
137 // Add a vector element for the 32-bits of chars or padding.
138 Args.push_back(B.getInt32(convertCharsToWord(Str, i)));
139 }
140}
141
142std::string getStringImm(const MachineInstr &MI, unsigned StartIndex) {
143 return getSPIRVStringOperand(MI, StartIndex);
144}
145
148 assert(Def && Def->getOpcode() == TargetOpcode::G_GLOBAL_VALUE &&
149 "Expected G_GLOBAL_VALUE");
150 const GlobalValue *GV = Def->getOperand(1).getGlobal();
151 Value *V = GV->getOperand(0);
153 return CDA->getAsCString().str();
154}
155
156void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB) {
157 const auto Bitwidth = Imm.getBitWidth();
158 if (Bitwidth == 1)
159 return; // Already handled
160 else if (Bitwidth <= 32) {
161 MIB.addImm(Imm.getZExtValue());
162 // Asm Printer needs this info to print floating-type correctly
163 if (Bitwidth == 16)
165 return;
166 } else if (Bitwidth <= 64) {
167 uint64_t FullImm = Imm.getZExtValue();
168 uint32_t LowBits = FullImm & 0xffffffff;
169 uint32_t HighBits = (FullImm >> 32) & 0xffffffff;
170 MIB.addImm(LowBits).addImm(HighBits);
171 // Asm Printer needs this info to print 64-bit operands correctly
173 return;
174 } else if (Bitwidth <= 128) {
175 uint32_t LowBits = Imm.getRawData()[0] & 0xffffffff;
176 uint32_t MidBits0 = (Imm.getRawData()[0] >> 32) & 0xffffffff;
177 uint32_t MidBits1 = Imm.getRawData()[1] & 0xffffffff;
178 uint32_t HighBits = (Imm.getRawData()[1] >> 32) & 0xffffffff;
179 MIB.addImm(LowBits).addImm(MidBits0).addImm(MidBits1).addImm(HighBits);
180 return;
181 }
182 report_fatal_error("Unsupported constant bitwidth");
183}
184
186 MachineIRBuilder &MIRBuilder) {
187 if (!Name.empty()) {
188 auto MIB = MIRBuilder.buildInstr(SPIRV::OpName).addUse(Target);
189 addStringImm(Name, MIB);
190 }
191}
192
194 const SPIRVInstrInfo &TII) {
195 if (!Name.empty()) {
196 auto MIB =
197 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpName))
198 .addUse(Target);
199 addStringImm(Name, MIB);
200 }
201}
202
204 const std::vector<uint32_t> &DecArgs,
205 StringRef StrImm) {
206 if (!StrImm.empty())
207 addStringImm(StrImm, MIB);
208 for (const auto &DecArg : DecArgs)
209 MIB.addImm(DecArg);
210}
211
213 SPIRV::Decoration::Decoration Dec,
214 const std::vector<uint32_t> &DecArgs, StringRef StrImm) {
215 auto MIB = MIRBuilder.buildInstr(SPIRV::OpDecorate)
216 .addUse(Reg)
217 .addImm(static_cast<uint32_t>(Dec));
218 finishBuildOpDecorate(MIB, DecArgs, StrImm);
219}
220
222 SPIRV::Decoration::Decoration Dec,
223 const std::vector<uint32_t> &DecArgs, StringRef StrImm) {
224 MachineBasicBlock &MBB = *I.getParent();
225 auto MIB = BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpDecorate))
226 .addUse(Reg)
227 .addImm(static_cast<uint32_t>(Dec));
228 finishBuildOpDecorate(MIB, DecArgs, StrImm);
229}
230
232 SPIRV::Decoration::Decoration Dec, uint32_t Member,
233 const std::vector<uint32_t> &DecArgs,
234 StringRef StrImm) {
235 auto MIB = MIRBuilder.buildInstr(SPIRV::OpMemberDecorate)
236 .addUse(Reg)
237 .addImm(Member)
238 .addImm(static_cast<uint32_t>(Dec));
239 finishBuildOpDecorate(MIB, DecArgs, StrImm);
240}
241
243 const SPIRVInstrInfo &TII,
244 SPIRV::Decoration::Decoration Dec, uint32_t Member,
245 const std::vector<uint32_t> &DecArgs,
246 StringRef StrImm) {
247 MachineBasicBlock &MBB = *I.getParent();
248 auto MIB = BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemberDecorate))
249 .addUse(Reg)
250 .addImm(Member)
251 .addImm(static_cast<uint32_t>(Dec));
252 finishBuildOpDecorate(MIB, DecArgs, StrImm);
253}
254
256 const MDNode *GVarMD, const SPIRVSubtarget &ST) {
257 for (unsigned I = 0, E = GVarMD->getNumOperands(); I != E; ++I) {
258 auto *OpMD = dyn_cast<MDNode>(GVarMD->getOperand(I));
259 if (!OpMD)
260 report_fatal_error("Invalid decoration");
261 if (OpMD->getNumOperands() == 0)
262 report_fatal_error("Expect operand(s) of the decoration");
263 ConstantInt *DecorationId =
264 mdconst::dyn_extract<ConstantInt>(OpMD->getOperand(0));
265 if (!DecorationId)
266 report_fatal_error("Expect SPIR-V <Decoration> operand to be the first "
267 "element of the decoration");
268
269 // The goal of `spirv.Decorations` metadata is to provide a way to
270 // represent SPIR-V entities that do not map to LLVM in an obvious way.
271 // FP flags do have obvious matches between LLVM IR and SPIR-V.
272 // Additionally, we have no guarantee at this point that the flags passed
273 // through the decoration are not violated already in the optimizer passes.
274 // Therefore, we simply ignore FP flags, including NoContraction, and
275 // FPFastMathMode.
276 if (DecorationId->getZExtValue() ==
277 static_cast<uint32_t>(SPIRV::Decoration::NoContraction) ||
278 DecorationId->getZExtValue() ==
279 static_cast<uint32_t>(SPIRV::Decoration::FPFastMathMode)) {
280 continue; // Ignored.
281 }
282 auto MIB = MIRBuilder.buildInstr(SPIRV::OpDecorate)
283 .addUse(Reg)
284 .addImm(static_cast<uint32_t>(DecorationId->getZExtValue()));
285 for (unsigned OpI = 1, OpE = OpMD->getNumOperands(); OpI != OpE; ++OpI) {
286 if (ConstantInt *OpV =
287 mdconst::dyn_extract<ConstantInt>(OpMD->getOperand(OpI)))
288 MIB.addImm(static_cast<uint32_t>(OpV->getZExtValue()));
289 else if (MDString *OpV = dyn_cast<MDString>(OpMD->getOperand(OpI)))
290 addStringImm(OpV->getString(), MIB);
291 else
292 report_fatal_error("Unexpected operand of the decoration");
293 }
294 }
295}
296
298 MachineFunction *MF = I.getParent()->getParent();
299 MachineBasicBlock *MBB = &MF->front();
300 MachineBasicBlock::iterator It = MBB->SkipPHIsAndLabels(MBB->begin()),
301 E = MBB->end();
302 bool IsHeader = false;
303 unsigned Opcode;
304 for (; It != E && It != I; ++It) {
305 Opcode = It->getOpcode();
306 if (Opcode == SPIRV::OpFunction || Opcode == SPIRV::OpFunctionParameter) {
307 IsHeader = true;
308 } else if (IsHeader &&
309 !(Opcode == SPIRV::ASSIGN_TYPE || Opcode == SPIRV::OpLabel)) {
310 ++It;
311 break;
312 }
313 }
314 return It;
315}
316
319 if (I == MBB->begin())
320 return I;
321 --I;
322 while (I->isTerminator() || I->isDebugValue()) {
323 if (I == MBB->begin())
324 break;
325 --I;
326 }
327 return I;
328}
329
330SPIRV::StorageClass::StorageClass
331addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI) {
332 switch (AddrSpace) {
333 case 0:
334 return SPIRV::StorageClass::Function;
335 case 1:
336 return SPIRV::StorageClass::CrossWorkgroup;
337 case 2:
338 return SPIRV::StorageClass::UniformConstant;
339 case 3:
340 return SPIRV::StorageClass::Workgroup;
341 case 4:
342 return SPIRV::StorageClass::Generic;
343 case 5:
344 return STI.canUseExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes)
345 ? SPIRV::StorageClass::DeviceOnlyINTEL
346 : SPIRV::StorageClass::CrossWorkgroup;
347 case 6:
348 return STI.canUseExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes)
349 ? SPIRV::StorageClass::HostOnlyINTEL
350 : SPIRV::StorageClass::CrossWorkgroup;
351 case 7:
352 return SPIRV::StorageClass::Input;
353 case 8:
354 return SPIRV::StorageClass::Output;
355 case 9:
356 return SPIRV::StorageClass::CodeSectionINTEL;
357 case 10:
358 return SPIRV::StorageClass::Private;
359 case 11:
360 return SPIRV::StorageClass::StorageBuffer;
361 case 12:
362 return SPIRV::StorageClass::Uniform;
363 default:
364 report_fatal_error("Unknown address space");
365 }
366}
367
368SPIRV::MemorySemantics::MemorySemantics
369getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC) {
370 switch (SC) {
371 case SPIRV::StorageClass::StorageBuffer:
372 case SPIRV::StorageClass::Uniform:
373 return SPIRV::MemorySemantics::UniformMemory;
374 case SPIRV::StorageClass::Workgroup:
375 return SPIRV::MemorySemantics::WorkgroupMemory;
376 case SPIRV::StorageClass::CrossWorkgroup:
377 return SPIRV::MemorySemantics::CrossWorkgroupMemory;
378 case SPIRV::StorageClass::AtomicCounter:
379 return SPIRV::MemorySemantics::AtomicCounterMemory;
380 case SPIRV::StorageClass::Image:
381 return SPIRV::MemorySemantics::ImageMemory;
382 default:
383 return SPIRV::MemorySemantics::None;
384 }
385}
386
387SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord) {
388 switch (Ord) {
390 return SPIRV::MemorySemantics::Acquire;
392 return SPIRV::MemorySemantics::Release;
394 return SPIRV::MemorySemantics::AcquireRelease;
396 return SPIRV::MemorySemantics::SequentiallyConsistent;
400 return SPIRV::MemorySemantics::None;
401 }
402 llvm_unreachable(nullptr);
403}
404
405SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id) {
406 // Named by
407 // https://registry.khronos.org/SPIR-V/specs/unified1/SPIRV.html#_scope_id.
408 // We don't need aliases for Invocation and CrossDevice, as we already have
409 // them covered by "singlethread" and "" strings respectively (see
410 // implementation of LLVMContext::LLVMContext()).
411 static const llvm::SyncScope::ID SubGroup =
412 Ctx.getOrInsertSyncScopeID("subgroup");
413 static const llvm::SyncScope::ID WorkGroup =
414 Ctx.getOrInsertSyncScopeID("workgroup");
415 static const llvm::SyncScope::ID Device =
416 Ctx.getOrInsertSyncScopeID("device");
417
419 return SPIRV::Scope::Invocation;
420 else if (Id == llvm::SyncScope::System)
421 return SPIRV::Scope::CrossDevice;
422 else if (Id == SubGroup)
423 return SPIRV::Scope::Subgroup;
424 else if (Id == WorkGroup)
425 return SPIRV::Scope::Workgroup;
426 else if (Id == Device)
427 return SPIRV::Scope::Device;
428 return SPIRV::Scope::CrossDevice;
429}
430
432 const MachineRegisterInfo *MRI) {
433 MachineInstr *MI = MRI->getVRegDef(ConstReg);
434 MachineInstr *ConstInstr =
435 MI->getOpcode() == SPIRV::G_TRUNC || MI->getOpcode() == SPIRV::G_ZEXT
436 ? MRI->getVRegDef(MI->getOperand(1).getReg())
437 : MI;
438 if (auto *GI = dyn_cast<GIntrinsic>(ConstInstr)) {
439 if (GI->is(Intrinsic::spv_track_constant)) {
440 ConstReg = ConstInstr->getOperand(2).getReg();
441 return MRI->getVRegDef(ConstReg);
442 }
443 } else if (ConstInstr->getOpcode() == SPIRV::ASSIGN_TYPE) {
444 ConstReg = ConstInstr->getOperand(1).getReg();
445 return MRI->getVRegDef(ConstReg);
446 } else if (ConstInstr->getOpcode() == TargetOpcode::G_CONSTANT ||
447 ConstInstr->getOpcode() == TargetOpcode::G_FCONSTANT) {
448 ConstReg = ConstInstr->getOperand(0).getReg();
449 return ConstInstr;
450 }
451 return MRI->getVRegDef(ConstReg);
452}
453
455 const MachineInstr *MI = getDefInstrMaybeConstant(ConstReg, MRI);
456 assert(MI && MI->getOpcode() == TargetOpcode::G_CONSTANT);
457 return MI->getOperand(1).getCImm()->getValue().getZExtValue();
458}
459
461 const MachineInstr *MI = getDefInstrMaybeConstant(ConstReg, MRI);
462 assert(MI && MI->getOpcode() == TargetOpcode::G_CONSTANT);
463 return MI->getOperand(1).getCImm()->getSExtValue();
464}
465
466bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID) {
467 if (const auto *GI = dyn_cast<GIntrinsic>(&MI))
468 return GI->is(IntrinsicID);
469 return false;
470}
471
472Type *getMDOperandAsType(const MDNode *N, unsigned I) {
473 Type *ElementTy = cast<ValueAsMetadata>(N->getOperand(I))->getType();
474 return toTypedPointer(ElementTy);
475}
476
477// The set of names is borrowed from the SPIR-V translator.
478// TODO: may be implemented in SPIRVBuiltins.td.
479static bool isPipeOrAddressSpaceCastBI(const StringRef MangledName) {
480 return MangledName == "write_pipe_2" || MangledName == "read_pipe_2" ||
481 MangledName == "write_pipe_2_bl" || MangledName == "read_pipe_2_bl" ||
482 MangledName == "write_pipe_4" || MangledName == "read_pipe_4" ||
483 MangledName == "reserve_write_pipe" ||
484 MangledName == "reserve_read_pipe" ||
485 MangledName == "commit_write_pipe" ||
486 MangledName == "commit_read_pipe" ||
487 MangledName == "work_group_reserve_write_pipe" ||
488 MangledName == "work_group_reserve_read_pipe" ||
489 MangledName == "work_group_commit_write_pipe" ||
490 MangledName == "work_group_commit_read_pipe" ||
491 MangledName == "get_pipe_num_packets_ro" ||
492 MangledName == "get_pipe_max_packets_ro" ||
493 MangledName == "get_pipe_num_packets_wo" ||
494 MangledName == "get_pipe_max_packets_wo" ||
495 MangledName == "sub_group_reserve_write_pipe" ||
496 MangledName == "sub_group_reserve_read_pipe" ||
497 MangledName == "sub_group_commit_write_pipe" ||
498 MangledName == "sub_group_commit_read_pipe" ||
499 MangledName == "to_global" || MangledName == "to_local" ||
500 MangledName == "to_private";
501}
502
503static bool isEnqueueKernelBI(const StringRef MangledName) {
504 return MangledName == "__enqueue_kernel_basic" ||
505 MangledName == "__enqueue_kernel_basic_events" ||
506 MangledName == "__enqueue_kernel_varargs" ||
507 MangledName == "__enqueue_kernel_events_varargs";
508}
509
510static bool isKernelQueryBI(const StringRef MangledName) {
511 return MangledName == "__get_kernel_work_group_size_impl" ||
512 MangledName == "__get_kernel_sub_group_count_for_ndrange_impl" ||
513 MangledName == "__get_kernel_max_sub_group_size_for_ndrange_impl" ||
514 MangledName == "__get_kernel_preferred_work_group_size_multiple_impl";
515}
516
518 if (!Name.starts_with("__"))
519 return false;
520
521 return isEnqueueKernelBI(Name) || isKernelQueryBI(Name) ||
522 isPipeOrAddressSpaceCastBI(Name.drop_front(2)) ||
523 Name == "__translate_sampler_initializer";
524}
525
527 bool IsNonMangledOCL = isNonMangledOCLBuiltin(Name);
528 bool IsNonMangledSPIRV = Name.starts_with("__spirv_");
529 bool IsNonMangledHLSL = Name.starts_with("__hlsl_");
530 bool IsMangled = Name.starts_with("_Z");
531
532 // Otherwise use simple demangling to return the function name.
533 if (IsNonMangledOCL || IsNonMangledSPIRV || IsNonMangledHLSL || !IsMangled)
534 return Name.str();
535
536 // Try to use the itanium demangler.
537 if (char *DemangledName = itaniumDemangle(Name.data())) {
538 std::string Result = DemangledName;
539 free(DemangledName);
540 return Result;
541 }
542
543 // Autocheck C++, maybe need to do explicit check of the source language.
544 // OpenCL C++ built-ins are declared in cl namespace.
545 // TODO: consider using 'St' abbriviation for cl namespace mangling.
546 // Similar to ::std:: in C++.
547 size_t Start, Len = 0;
548 size_t DemangledNameLenStart = 2;
549 if (Name.starts_with("_ZN")) {
550 // Skip CV and ref qualifiers.
551 size_t NameSpaceStart = Name.find_first_not_of("rVKRO", 3);
552 // All built-ins are in the ::cl:: namespace.
553 if (Name.substr(NameSpaceStart, 11) != "2cl7__spirv")
554 return std::string();
555 DemangledNameLenStart = NameSpaceStart + 11;
556 }
557 Start = Name.find_first_not_of("0123456789", DemangledNameLenStart);
558 [[maybe_unused]] bool Error =
559 Name.substr(DemangledNameLenStart, Start - DemangledNameLenStart)
560 .getAsInteger(10, Len);
561 assert(!Error && "Failed to parse demangled name length");
562 return Name.substr(Start, Len).str();
563}
564
566 if (Name.starts_with("opencl.") || Name.starts_with("ocl_") ||
567 Name.starts_with("spirv."))
568 return true;
569 return false;
570}
571
572bool isSpecialOpaqueType(const Type *Ty) {
573 if (const TargetExtType *ExtTy = dyn_cast<TargetExtType>(Ty))
574 return isTypedPointerWrapper(ExtTy)
575 ? false
576 : hasBuiltinTypePrefix(ExtTy->getName());
577
578 return false;
579}
580
581bool isEntryPoint(const Function &F) {
582 // OpenCL handling: any function with the SPIR_KERNEL
583 // calling convention will be a potential entry point.
584 if (F.getCallingConv() == CallingConv::SPIR_KERNEL)
585 return true;
586
587 // HLSL handling: special attribute are emitted from the
588 // front-end.
589 if (F.getFnAttribute("hlsl.shader").isValid())
590 return true;
591
592 return false;
593}
594
596 TypeName.consume_front("atomic_");
597 if (TypeName.consume_front("void"))
598 return Type::getVoidTy(Ctx);
599 else if (TypeName.consume_front("bool") || TypeName.consume_front("_Bool"))
600 return Type::getIntNTy(Ctx, 1);
601 else if (TypeName.consume_front("char") ||
602 TypeName.consume_front("signed char") ||
603 TypeName.consume_front("unsigned char") ||
604 TypeName.consume_front("uchar"))
605 return Type::getInt8Ty(Ctx);
606 else if (TypeName.consume_front("short") ||
607 TypeName.consume_front("signed short") ||
608 TypeName.consume_front("unsigned short") ||
609 TypeName.consume_front("ushort"))
610 return Type::getInt16Ty(Ctx);
611 else if (TypeName.consume_front("int") ||
612 TypeName.consume_front("signed int") ||
613 TypeName.consume_front("unsigned int") ||
614 TypeName.consume_front("uint"))
615 return Type::getInt32Ty(Ctx);
616 else if (TypeName.consume_front("long") ||
617 TypeName.consume_front("signed long") ||
618 TypeName.consume_front("unsigned long") ||
619 TypeName.consume_front("ulong"))
620 return Type::getInt64Ty(Ctx);
621 else if (TypeName.consume_front("half") ||
622 TypeName.consume_front("_Float16") ||
623 TypeName.consume_front("__fp16"))
624 return Type::getHalfTy(Ctx);
625 else if (TypeName.consume_front("float"))
626 return Type::getFloatTy(Ctx);
627 else if (TypeName.consume_front("double"))
628 return Type::getDoubleTy(Ctx);
629
630 // Unable to recognize SPIRV type name
631 return nullptr;
632}
633
634std::unordered_set<BasicBlock *>
635PartialOrderingVisitor::getReachableFrom(BasicBlock *Start) {
636 std::queue<BasicBlock *> ToVisit;
637 ToVisit.push(Start);
638
639 std::unordered_set<BasicBlock *> Output;
640 while (ToVisit.size() != 0) {
641 BasicBlock *BB = ToVisit.front();
642 ToVisit.pop();
643
644 if (Output.count(BB) != 0)
645 continue;
646 Output.insert(BB);
647
648 for (BasicBlock *Successor : successors(BB)) {
649 if (DT.dominates(Successor, BB))
650 continue;
651 ToVisit.push(Successor);
652 }
653 }
654
655 return Output;
656}
657
658bool PartialOrderingVisitor::CanBeVisited(BasicBlock *BB) const {
659 for (BasicBlock *P : predecessors(BB)) {
660 // Ignore back-edges.
661 if (DT.dominates(BB, P))
662 continue;
663
664 // One of the predecessor hasn't been visited. Not ready yet.
665 if (BlockToOrder.count(P) == 0)
666 return false;
667
668 // If the block is a loop exit, the loop must be finished before
669 // we can continue.
670 Loop *L = LI.getLoopFor(P);
671 if (L == nullptr || L->contains(BB))
672 continue;
673
674 // SPIR-V requires a single back-edge. And the backend first
675 // step transforms loops into the simplified format. If we have
676 // more than 1 back-edge, something is wrong.
677 assert(L->getNumBackEdges() <= 1);
678
679 // If the loop has no latch, loop's rank won't matter, so we can
680 // proceed.
681 BasicBlock *Latch = L->getLoopLatch();
682 assert(Latch);
683 if (Latch == nullptr)
684 continue;
685
686 // The latch is not ready yet, let's wait.
687 if (BlockToOrder.count(Latch) == 0)
688 return false;
689 }
690
691 return true;
692}
693
695 auto It = BlockToOrder.find(BB);
696 if (It != BlockToOrder.end())
697 return It->second.Rank;
698
699 size_t result = 0;
700 for (BasicBlock *P : predecessors(BB)) {
701 // Ignore back-edges.
702 if (DT.dominates(BB, P))
703 continue;
704
705 auto Iterator = BlockToOrder.end();
706 Loop *L = LI.getLoopFor(P);
707 BasicBlock *Latch = L ? L->getLoopLatch() : nullptr;
708
709 // If the predecessor is either outside a loop, or part of
710 // the same loop, simply take its rank + 1.
711 if (L == nullptr || L->contains(BB) || Latch == nullptr) {
712 Iterator = BlockToOrder.find(P);
713 } else {
714 // Otherwise, take the loop's rank (highest rank in the loop) as base.
715 // Since loops have a single latch, highest rank is easy to find.
716 // If the loop has no latch, then it doesn't matter.
717 Iterator = BlockToOrder.find(Latch);
718 }
719
720 assert(Iterator != BlockToOrder.end());
721 result = std::max(result, Iterator->second.Rank + 1);
722 }
723
724 return result;
725}
726
727size_t PartialOrderingVisitor::visit(BasicBlock *BB, size_t Unused) {
728 ToVisit.push(BB);
729 Queued.insert(BB);
730
731 size_t QueueIndex = 0;
732 while (ToVisit.size() != 0) {
733 BasicBlock *BB = ToVisit.front();
734 ToVisit.pop();
735
736 if (!CanBeVisited(BB)) {
737 ToVisit.push(BB);
738 if (QueueIndex >= ToVisit.size())
740 "No valid candidate in the queue. Is the graph reducible?");
741 QueueIndex++;
742 continue;
743 }
744
745 QueueIndex = 0;
746 size_t Rank = GetNodeRank(BB);
747 OrderInfo Info = {Rank, BlockToOrder.size()};
748 BlockToOrder.emplace(BB, Info);
749
750 for (BasicBlock *S : successors(BB)) {
751 if (Queued.count(S) != 0)
752 continue;
753 ToVisit.push(S);
754 Queued.insert(S);
755 }
756 }
757
758 return 0;
759}
760
762 DT.recalculate(F);
763 LI = LoopInfo(DT);
764
765 visit(&*F.begin(), 0);
766
767 Order.reserve(F.size());
768 for (auto &[BB, Info] : BlockToOrder)
769 Order.emplace_back(BB);
770
771 std::sort(Order.begin(), Order.end(), [&](const auto &LHS, const auto &RHS) {
772 return compare(LHS, RHS);
773 });
774}
775
777 const BasicBlock *RHS) const {
778 const OrderInfo &InfoLHS = BlockToOrder.at(const_cast<BasicBlock *>(LHS));
779 const OrderInfo &InfoRHS = BlockToOrder.at(const_cast<BasicBlock *>(RHS));
780 if (InfoLHS.Rank != InfoRHS.Rank)
781 return InfoLHS.Rank < InfoRHS.Rank;
782 return InfoLHS.TraversalIndex < InfoRHS.TraversalIndex;
783}
784
786 BasicBlock &Start, std::function<bool(BasicBlock *)> Op) {
787 std::unordered_set<BasicBlock *> Reachable = getReachableFrom(&Start);
788 assert(BlockToOrder.count(&Start) != 0);
789
790 // Skipping blocks with a rank inferior to |Start|'s rank.
791 auto It = Order.begin();
792 while (It != Order.end() && *It != &Start)
793 ++It;
794
795 // This is unexpected. Worst case |Start| is the last block,
796 // so It should point to the last block, not past-end.
797 assert(It != Order.end());
798
799 // By default, there is no rank limit. Setting it to the maximum value.
800 std::optional<size_t> EndRank = std::nullopt;
801 for (; It != Order.end(); ++It) {
802 if (EndRank.has_value() && BlockToOrder[*It].Rank > *EndRank)
803 break;
804
805 if (Reachable.count(*It) == 0) {
806 continue;
807 }
808
809 if (!Op(*It)) {
810 EndRank = BlockToOrder[*It].Rank;
811 }
812 }
813}
814
816 if (F.size() == 0)
817 return false;
818
819 bool Modified = false;
820 std::vector<BasicBlock *> Order;
821 Order.reserve(F.size());
822
824 llvm::append_range(Order, RPOT);
825
826 assert(&*F.begin() == Order[0]);
827 BasicBlock *LastBlock = &*F.begin();
828 for (BasicBlock *BB : Order) {
829 if (BB != LastBlock && &*LastBlock->getNextNode() != BB) {
830 Modified = true;
831 BB->moveAfter(LastBlock);
832 }
833 LastBlock = BB;
834 }
835
836 return Modified;
837}
838
840 MachineInstr *MaybeDef = MRI.getVRegDef(Reg);
841 if (MaybeDef && MaybeDef->getOpcode() == SPIRV::ASSIGN_TYPE)
842 MaybeDef = MRI.getVRegDef(MaybeDef->getOperand(1).getReg());
843 return MaybeDef;
844}
845
846bool getVacantFunctionName(Module &M, std::string &Name) {
847 // It's a bit of paranoia, but still we don't want to have even a chance that
848 // the loop will work for too long.
849 constexpr unsigned MaxIters = 1024;
850 for (unsigned I = 0; I < MaxIters; ++I) {
851 std::string OrdName = Name + Twine(I).str();
852 if (!M.getFunction(OrdName)) {
853 Name = std::move(OrdName);
854 return true;
855 }
856 }
857 return false;
858}
859
860// Assign SPIR-V type to the register. If the register has no valid assigned
861// class, set register LLT type and class according to the SPIR-V type.
864 bool Force) {
865 GR->assignSPIRVTypeToVReg(SpvType, Reg, MF);
866 if (!MRI->getRegClassOrNull(Reg) || Force) {
867 MRI->setRegClass(Reg, GR->getRegClass(SpvType));
868 MRI->setType(Reg, GR->getRegType(SpvType));
869 }
870}
871
872// Create a SPIR-V type, assign SPIR-V type to the register. If the register has
873// no valid assigned class, set register LLT type and class according to the
874// SPIR-V type.
876 MachineIRBuilder &MIRBuilder,
877 SPIRV::AccessQualifier::AccessQualifier AccessQual,
878 bool EmitIR, bool Force) {
880 GR->getOrCreateSPIRVType(Ty, MIRBuilder, AccessQual, EmitIR),
881 GR, MIRBuilder.getMRI(), MIRBuilder.getMF(), Force);
882}
883
884// Create a virtual register and assign SPIR-V type to the register. Set
885// register LLT type and class according to the SPIR-V type.
888 const MachineFunction &MF) {
889 Register Reg = MRI->createVirtualRegister(GR->getRegClass(SpvType));
890 MRI->setType(Reg, GR->getRegType(SpvType));
891 GR->assignSPIRVTypeToVReg(SpvType, Reg, MF);
892 return Reg;
893}
894
895// Create a virtual register and assign SPIR-V type to the register. Set
896// register LLT type and class according to the SPIR-V type.
898 MachineIRBuilder &MIRBuilder) {
899 return createVirtualRegister(SpvType, GR, MIRBuilder.getMRI(),
900 MIRBuilder.getMF());
901}
902
903// Create a SPIR-V type, virtual register and assign SPIR-V type to the
904// register. Set register LLT type and class according to the SPIR-V type.
906 const Type *Ty, SPIRVGlobalRegistry *GR, MachineIRBuilder &MIRBuilder,
907 SPIRV::AccessQualifier::AccessQualifier AccessQual, bool EmitIR) {
909 GR->getOrCreateSPIRVType(Ty, MIRBuilder, AccessQual, EmitIR), GR,
910 MIRBuilder);
911}
912
914 Value *Arg, Value *Arg2, ArrayRef<Constant *> Imms,
915 IRBuilder<> &B) {
917 Args.push_back(Arg2);
918 Args.push_back(buildMD(Arg));
919 llvm::append_range(Args, Imms);
920 return B.CreateIntrinsic(IntrID, {Types}, Args);
921}
922
923// Return true if there is an opaque pointer type nested in the argument.
924bool isNestedPointer(const Type *Ty) {
925 if (Ty->isPtrOrPtrVectorTy())
926 return true;
927 if (const FunctionType *RefTy = dyn_cast<FunctionType>(Ty)) {
928 if (isNestedPointer(RefTy->getReturnType()))
929 return true;
930 for (const Type *ArgTy : RefTy->params())
931 if (isNestedPointer(ArgTy))
932 return true;
933 return false;
934 }
935 if (const ArrayType *RefTy = dyn_cast<ArrayType>(Ty))
936 return isNestedPointer(RefTy->getElementType());
937 return false;
938}
939
940bool isSpvIntrinsic(const Value *Arg) {
941 if (const auto *II = dyn_cast<IntrinsicInst>(Arg))
942 if (Function *F = II->getCalledFunction())
943 if (F->getName().starts_with("llvm.spv."))
944 return true;
945 return false;
946}
947
948// Function to create continued instructions for SPV_INTEL_long_composites
949// extension
950SmallVector<MachineInstr *, 4>
952 unsigned MinWC, unsigned ContinuedOpcode,
953 ArrayRef<Register> Args, Register ReturnRegister,
955
957 constexpr unsigned MaxWordCount = UINT16_MAX;
958 const size_t NumElements = Args.size();
959 size_t MaxNumElements = MaxWordCount - MinWC;
960 size_t SPIRVStructNumElements = NumElements;
961
962 if (NumElements > MaxNumElements) {
963 // Do adjustments for continued instructions which always had only one
964 // minumum word count.
965 SPIRVStructNumElements = MaxNumElements;
966 MaxNumElements = MaxWordCount - 1;
967 }
968
969 auto MIB =
970 MIRBuilder.buildInstr(Opcode).addDef(ReturnRegister).addUse(TypeID);
971
972 for (size_t I = 0; I < SPIRVStructNumElements; ++I)
973 MIB.addUse(Args[I]);
974
975 Instructions.push_back(MIB.getInstr());
976
977 for (size_t I = SPIRVStructNumElements; I < NumElements;
978 I += MaxNumElements) {
979 auto MIB = MIRBuilder.buildInstr(ContinuedOpcode);
980 for (size_t J = I; J < std::min(I + MaxNumElements, NumElements); ++J)
981 MIB.addUse(Args[J]);
982 Instructions.push_back(MIB.getInstr());
983 }
984 return Instructions;
985}
986
988 unsigned LC = SPIRV::LoopControl::None;
989 // Currently used only to store PartialCount value. Later when other
990 // LoopControls are added - this map should be sorted before making
991 // them loop_merge operands to satisfy 3.23. Loop Control requirements.
992 std::vector<std::pair<unsigned, unsigned>> MaskToValueMap;
993 if (getBooleanLoopAttribute(L, "llvm.loop.unroll.disable")) {
994 LC |= SPIRV::LoopControl::DontUnroll;
995 } else {
996 if (getBooleanLoopAttribute(L, "llvm.loop.unroll.enable") ||
997 getBooleanLoopAttribute(L, "llvm.loop.unroll.full")) {
998 LC |= SPIRV::LoopControl::Unroll;
999 }
1000 std::optional<int> Count =
1001 getOptionalIntLoopAttribute(L, "llvm.loop.unroll.count");
1002 if (Count && Count != 1) {
1003 LC |= SPIRV::LoopControl::PartialCount;
1004 MaskToValueMap.emplace_back(
1005 std::make_pair(SPIRV::LoopControl::PartialCount, *Count));
1006 }
1007 }
1008 SmallVector<unsigned, 1> Result = {LC};
1009 for (auto &[Mask, Val] : MaskToValueMap)
1010 Result.push_back(Val);
1011 return Result;
1012}
1013
1014const std::set<unsigned> &getTypeFoldingSupportedOpcodes() {
1015 // clang-format off
1016 static const std::set<unsigned> TypeFoldingSupportingOpcs = {
1017 TargetOpcode::G_ADD,
1018 TargetOpcode::G_FADD,
1019 TargetOpcode::G_STRICT_FADD,
1020 TargetOpcode::G_SUB,
1021 TargetOpcode::G_FSUB,
1022 TargetOpcode::G_STRICT_FSUB,
1023 TargetOpcode::G_MUL,
1024 TargetOpcode::G_FMUL,
1025 TargetOpcode::G_STRICT_FMUL,
1026 TargetOpcode::G_SDIV,
1027 TargetOpcode::G_UDIV,
1028 TargetOpcode::G_FDIV,
1029 TargetOpcode::G_STRICT_FDIV,
1030 TargetOpcode::G_SREM,
1031 TargetOpcode::G_UREM,
1032 TargetOpcode::G_FREM,
1033 TargetOpcode::G_STRICT_FREM,
1034 TargetOpcode::G_FNEG,
1035 TargetOpcode::G_CONSTANT,
1036 TargetOpcode::G_FCONSTANT,
1037 TargetOpcode::G_AND,
1038 TargetOpcode::G_OR,
1039 TargetOpcode::G_XOR,
1040 TargetOpcode::G_SHL,
1041 TargetOpcode::G_ASHR,
1042 TargetOpcode::G_LSHR,
1043 TargetOpcode::G_SELECT,
1044 TargetOpcode::G_EXTRACT_VECTOR_ELT,
1045 };
1046 // clang-format on
1047 return TypeFoldingSupportingOpcs;
1048}
1049
1050bool isTypeFoldingSupported(unsigned Opcode) {
1051 return getTypeFoldingSupportedOpcodes().count(Opcode) > 0;
1052}
1053
1054// Traversing [g]MIR accounting for pseudo-instructions.
1056 return (Def->getOpcode() == SPIRV::ASSIGN_TYPE ||
1057 Def->getOpcode() == TargetOpcode::COPY)
1058 ? MRI->getVRegDef(Def->getOperand(1).getReg())
1059 : Def;
1060}
1061
1063 if (MachineInstr *Def = MRI->getVRegDef(MO.getReg()))
1064 return passCopy(Def, MRI);
1065 return nullptr;
1066}
1067
1069 if (MachineInstr *Def = getDef(MO, MRI)) {
1070 if (Def->getOpcode() == TargetOpcode::G_CONSTANT ||
1071 Def->getOpcode() == SPIRV::OpConstantI)
1072 return Def;
1073 }
1074 return nullptr;
1075}
1076
1077int64_t foldImm(const MachineOperand &MO, const MachineRegisterInfo *MRI) {
1078 if (MachineInstr *Def = getImm(MO, MRI)) {
1079 if (Def->getOpcode() == SPIRV::OpConstantI)
1080 return Def->getOperand(2).getImm();
1081 if (Def->getOpcode() == TargetOpcode::G_CONSTANT)
1082 return Def->getOperand(1).getCImm()->getZExtValue();
1083 }
1084 llvm_unreachable("Unexpected integer constant pattern");
1085}
1086
1088 const MachineInstr *ResType) {
1089 return foldImm(ResType->getOperand(2), MRI);
1090}
1091
1094 // Find the position to insert the OpVariable instruction.
1095 // We will insert it after the last OpFunctionParameter, if any, or
1096 // after OpFunction otherwise.
1097 MachineBasicBlock::iterator VarPos = BB.begin();
1098 while (VarPos != BB.end() && VarPos->getOpcode() != SPIRV::OpFunction) {
1099 ++VarPos;
1100 }
1101 // Advance VarPos to the next instruction after OpFunction, it will either
1102 // be an OpFunctionParameter, so that we can start the next loop, or the
1103 // position to insert the OpVariable instruction.
1104 ++VarPos;
1105 while (VarPos != BB.end() &&
1106 VarPos->getOpcode() == SPIRV::OpFunctionParameter) {
1107 ++VarPos;
1108 }
1109 // VarPos is now pointing at after the last OpFunctionParameter, if any,
1110 // or after OpFunction, if no parameters.
1111 return VarPos != BB.end() && VarPos->getOpcode() == SPIRV::OpLabel ? ++VarPos
1112 : VarPos;
1113}
1114
1115bool matchPeeledArrayPattern(const StructType *Ty, Type *&OriginalElementType,
1116 uint64_t &TotalSize) {
1117 // An array of N padded structs is represented as {[N-1 x <{T, pad}>], T}.
1118 if (Ty->getStructNumElements() != 2)
1119 return false;
1120
1121 Type *FirstElement = Ty->getStructElementType(0);
1122 Type *SecondElement = Ty->getStructElementType(1);
1123
1124 if (!FirstElement->isArrayTy())
1125 return false;
1126
1127 Type *ArrayElementType = FirstElement->getArrayElementType();
1128 if (!ArrayElementType->isStructTy() ||
1129 ArrayElementType->getStructNumElements() != 2)
1130 return false;
1131
1132 Type *T_in_struct = ArrayElementType->getStructElementType(0);
1133 if (T_in_struct != SecondElement)
1134 return false;
1135
1136 auto *Padding_in_struct =
1137 dyn_cast<TargetExtType>(ArrayElementType->getStructElementType(1));
1138 if (!Padding_in_struct || Padding_in_struct->getName() != "spirv.Padding")
1139 return false;
1140
1141 const uint64_t ArraySize = FirstElement->getArrayNumElements();
1142 TotalSize = ArraySize + 1;
1143 OriginalElementType = ArrayElementType;
1144 return true;
1145}
1146
1148 if (!Ty->isStructTy())
1149 return Ty;
1150
1151 auto *STy = cast<StructType>(Ty);
1152 Type *OriginalElementType = nullptr;
1153 uint64_t TotalSize = 0;
1154 if (matchPeeledArrayPattern(STy, OriginalElementType, TotalSize)) {
1155 Type *ResultTy = ArrayType::get(
1156 reconstitutePeeledArrayType(OriginalElementType), TotalSize);
1157 return ResultTy;
1158 }
1159
1160 SmallVector<Type *, 4> NewElementTypes;
1161 bool Changed = false;
1162 for (Type *ElementTy : STy->elements()) {
1163 Type *NewElementTy = reconstitutePeeledArrayType(ElementTy);
1164 if (NewElementTy != ElementTy)
1165 Changed = true;
1166 NewElementTypes.push_back(NewElementTy);
1167 }
1168
1169 if (!Changed)
1170 return Ty;
1171
1172 Type *ResultTy;
1173 if (STy->isLiteral())
1174 ResultTy =
1175 StructType::get(STy->getContext(), NewElementTypes, STy->isPacked());
1176 else {
1177 auto *NewTy = StructType::create(STy->getContext(), STy->getName());
1178 NewTy->setBody(NewElementTypes, STy->isPacked());
1179 ResultTy = NewTy;
1180 }
1181 return ResultTy;
1182}
1183
1184std::optional<SPIRV::LinkageType::LinkageType>
1186 if (GV.hasLocalLinkage() || GV.hasHiddenVisibility())
1187 return std::nullopt;
1188
1189 if (GV.isDeclarationForLinker())
1190 return SPIRV::LinkageType::Import;
1191
1192 if (GV.hasLinkOnceODRLinkage() &&
1193 ST.canUseExtension(SPIRV::Extension::SPV_KHR_linkonce_odr))
1194 return SPIRV::LinkageType::LinkOnceODR;
1195
1196 return SPIRV::LinkageType::Export;
1197}
1198
1199} // namespace llvm
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file declares the MachineIRBuilder class.
Register Reg
Type::TypeID TypeID
uint64_t IntrinsicInst * II
#define P(N)
static ConstantInt * getConstInt(MDNode *MD, unsigned NumOp)
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition APInt.h:78
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
Class to represent array types.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
LLVM_ABI void moveAfter(BasicBlock *MovePos)
Unlink this basic block from its current function and insert it right after MovePos in the function M...
const Instruction & front() const
Definition BasicBlock.h:482
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
FunctionType * getFunctionType() const
This class represents a function call, abstracting a target machine's calling convention.
An array constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
Definition Constants.h:707
StringRef getAsCString() const
If this array is isCString(), then this method returns the array (without the trailing null byte) as ...
Definition Constants.h:680
This is the shared class of boolean and integer constants.
Definition Constants.h:87
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
bool dominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
dominates - Returns true iff A dominates B.
Lightweight error class with error context and mandatory checking.
Definition Error.h:159
Class to represent function types.
ArrayRef< Type * > params() const
bool isVarArg() const
Type * getReturnType() const
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
bool hasLocalLinkage() const
bool hasHiddenVisibility() const
bool isDeclarationForLinker() const
bool hasLinkOnceODRLinkage() const
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2788
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
Instances of this class represent a single low-level machine instruction.
Definition MCInst.h:188
void addOperand(const MCOperand Op)
Definition MCInst.h:215
static MCOperand createImm(int64_t Val)
Definition MCInst.h:145
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1448
A single uniqued string.
Definition Metadata.h:721
MachineInstrBundleIterator< MachineInstr > iterator
const MachineBasicBlock & front() const
Helper class to build MachineInstr.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
void setAsmPrinterFlag(uint8_t Flag)
Set a flag for the AsmPrinter.
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
NamedMDNode * getNamedMetadata(StringRef Name) const
Return the first NamedMDNode in the module with the specified name.
Definition Module.cpp:296
A tuple of MDNodes.
Definition Metadata.h:1757
op_iterator op_end()
Definition Metadata.h:1846
iterator_range< op_iterator > operands()
Definition Metadata.h:1853
size_t GetNodeRank(BasicBlock *BB) const
void partialOrderVisit(BasicBlock &Start, std::function< bool(BasicBlock *)> Op)
bool compare(const BasicBlock *LHS, const BasicBlock *RHS) const
Wrapper class representing virtual and physical registers.
Definition Register.h:20
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, const MachineFunction &MF)
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineInstr &I, SPIRV::AccessQualifier::AccessQualifier AQ, bool EmitIR)
const TargetRegisterClass * getRegClass(SPIRVType *SpvType) const
LLT getRegType(SPIRVType *SpvType) const
bool canUseExtension(SPIRV::Extension::Extension E) const
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::string str() const
str - Get the contents as an std::string.
Definition StringRef.h:225
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
Class to represent struct types.
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition Type.cpp:413
static LLVM_ABI StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Definition Type.cpp:619
Class to represent target extensions types, which are generally unintrospectable from target-independ...
Target - Wrapper for Target specific information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
LLVM_ABI std::string str() const
Return the twine contents as a std::string.
Definition Twine.cpp:17
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
Definition Type.cpp:297
LLVM_ABI Type * getStructElementType(unsigned N) const
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:264
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
Type * getArrayElementType() const
Definition Type.h:408
LLVM_ABI unsigned getStructNumElements() const
LLVM_ABI uint64_t getArrayNumElements() const
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:280
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:294
bool isStructTy() const
True if this is an instance of StructType.
Definition Type.h:261
static LLVM_ABI IntegerType * getInt16Ty(LLVMContext &C)
Definition Type.cpp:295
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition Type.cpp:300
static LLVM_ABI Type * getDoubleTy(LLVMContext &C)
Definition Type.cpp:285
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
Definition Type.cpp:284
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
Definition Type.cpp:282
Value * getOperand(unsigned i) const
Definition User.h:232
LLVM Value Representation.
Definition Value.h:75
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
FunctionType * getOriginalFunctionType(const Function &F)
static FunctionType * extractFunctionTypeFromMetadata(NamedMDNode *NMD, FunctionType *FTy, StringRef Name)
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
Definition LLVMContext.h:55
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract(Y &&MD)
Extract a Value from Metadata, if any.
Definition Metadata.h:695
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
bool getVacantFunctionName(Module &M, std::string &Name)
std::string getStringImm(const MachineInstr &MI, unsigned StartIndex)
LLVM_ABI bool getBooleanLoopAttribute(const Loop *TheLoop, StringRef Name)
Returns true if Name is applied to TheLoop and enabled.
int64_t getIConstValSext(Register ConstReg, const MachineRegisterInfo *MRI)
bool isTypedPointerWrapper(const TargetExtType *ExtTy)
Definition SPIRVUtils.h:401
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
static void finishBuildOpDecorate(MachineInstrBuilder &MIB, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
bool isTypeFoldingSupported(unsigned Opcode)
static uint32_t convertCharsToWord(const StringRef &Str, unsigned i)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
MachineInstr * getDef(const MachineOperand &MO, const MachineRegisterInfo *MRI)
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
auto successors(const MachineBasicBlock *BB)
CallInst * buildIntrWithMD(Intrinsic::ID IntrID, ArrayRef< Type * > Types, Value *Arg, Value *Arg2, ArrayRef< Constant * > Imms, IRBuilder<> &B)
bool matchPeeledArrayPattern(const StructType *Ty, Type *&OriginalElementType, uint64_t &TotalSize)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2148
unsigned getArrayComponentCount(const MachineRegisterInfo *MRI, const MachineInstr *ResType)
bool sortBlocks(Function &F)
SmallVector< unsigned, 1 > getSpirvLoopControlOperandsFromLoopMetadata(Loop *L)
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
SmallVector< MachineInstr *, 4 > createContinuedInstructions(MachineIRBuilder &MIRBuilder, unsigned Opcode, unsigned MinWC, unsigned ContinuedOpcode, ArrayRef< Register > Args, Register ReturnRegister, Register TypeID)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
MachineBasicBlock::iterator getFirstValidInstructionInsertPoint(MachineBasicBlock &BB)
bool isNestedPointer(const Type *Ty)
MetadataAsValue * buildMD(Value *Arg)
Definition SPIRVUtils.h:511
std::string getOclOrSpirvBuiltinDemangledName(StringRef Name)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
MachineBasicBlock::iterator getOpVariableMBBIt(MachineInstr &I)
Register createVirtualRegister(SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
std::string getSPIRVStringOperand(const InstType &MI, unsigned StartIndex)
void buildOpMemberDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, uint32_t Member, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Type * toTypedPointer(Type *Ty)
Definition SPIRVUtils.h:456
DEMANGLE_ABI char * itaniumDemangle(std::string_view mangled_name, bool ParseParams=true)
Returns a non-NULL pointer to a NUL-terminated C style string that should be explicitly freed,...
bool isSpecialOpaqueType(const Type *Ty)
void setRegClassType(Register Reg, SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF, bool Force)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
MachineBasicBlock::iterator getInsertPtValidEnd(MachineBasicBlock *MBB)
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
const MachineInstr SPIRVType
static bool isNonMangledOCLBuiltin(StringRef Name)
MachineInstr * passCopy(MachineInstr *Def, const MachineRegisterInfo *MRI)
std::optional< SPIRV::LinkageType::LinkageType > getSpirvLinkageTypeFor(const SPIRVSubtarget &ST, const GlobalValue &GV)
bool isEntryPoint(const Function &F)
const std::set< unsigned > & getTypeFoldingSupportedOpcodes()
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
LLVM_ABI std::optional< int > getOptionalIntLoopAttribute(const Loop *TheLoop, StringRef Name)
Find named metadata for a loop with an integer value.
AtomicOrdering
Atomic ordering for LLVM's memory model.
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
static bool isPipeOrAddressSpaceCastBI(const StringRef MangledName)
void buildOpSpirvDecorations(Register Reg, MachineIRBuilder &MIRBuilder, const MDNode *GVarMD, const SPIRVSubtarget &ST)
std::string getStringValueFromReg(Register Reg, MachineRegisterInfo &MRI)
int64_t foldImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
Type * parseBasicTypeName(StringRef &TypeName, LLVMContext &Ctx)
DWARFExpression::Operation Op
MachineInstr * getDefInstrMaybeConstant(Register &ConstReg, const MachineRegisterInfo *MRI)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
bool hasBuiltinTypePrefix(StringRef Name)
Type * getMDOperandAsType(const MDNode *N, unsigned I)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1770
auto predecessors(const MachineBasicBlock *BB)
static size_t getPaddedLen(const StringRef &Str)
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
void addStringImm(const StringRef &Str, MCInst &Inst)
static bool isKernelQueryBI(const StringRef MangledName)
MachineInstr * getVRegDef(MachineRegisterInfo &MRI, Register Reg)
static bool isEnqueueKernelBI(const StringRef MangledName)
Type * reconstitutePeeledArrayType(Type *Ty)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
#define N