26#include "llvm/IR/IntrinsicsSPIRV.h"
51 if (auto *MDS = dyn_cast_or_null<MDString>(N->getOperand(0)))
52 return MDS->getString() == Name;
62 for (
unsigned I = 1;
I != (*It)->getNumOperands(); ++
I) {
64 assert(MD &&
"MDNode operand is expected");
68 assert(CMeta &&
"ConstantAsMetadata operand is expected");
69 assert(Const->getSExtValue() >= -1);
72 if (Const->getSExtValue() == -1)
73 RetTy = CMeta->getType();
75 PTys[Const->getSExtValue()] = CMeta->getType();
84 F.getParent()->getNamedMetadata(
"spv.cloned_funcs"),
F.getFunctionType(),
101 for (
unsigned WordIndex = 0; WordIndex < 4; ++WordIndex) {
102 unsigned StrIndex = i + WordIndex;
104 if (StrIndex < Str.size()) {
105 CharToAdd = Str[StrIndex];
107 Word |= (CharToAdd << (WordIndex * 8));
114 return (Str.size() + 4) & ~3;
119 for (
unsigned i = 0; i < PaddedLen; i += 4) {
127 for (
unsigned i = 0; i < PaddedLen; i += 4) {
134 std::vector<Value *> &Args) {
136 for (
unsigned i = 0; i < PaddedLen; i += 4) {
148 assert(Def && Def->getOpcode() == TargetOpcode::G_GLOBAL_VALUE &&
149 "Expected G_GLOBAL_VALUE");
150 const GlobalValue *GV = Def->getOperand(1).getGlobal();
157 const auto Bitwidth = Imm.getBitWidth();
160 else if (Bitwidth <= 32) {
161 MIB.
addImm(Imm.getZExtValue());
166 }
else if (Bitwidth <= 64) {
167 uint64_t FullImm = Imm.getZExtValue();
168 uint32_t LowBits = FullImm & 0xffffffff;
169 uint32_t HighBits = (FullImm >> 32) & 0xffffffff;
174 }
else if (Bitwidth <= 128) {
175 uint32_t LowBits = Imm.getRawData()[0] & 0xffffffff;
176 uint32_t MidBits0 = (Imm.getRawData()[0] >> 32) & 0xffffffff;
177 uint32_t MidBits1 = Imm.getRawData()[1] & 0xffffffff;
178 uint32_t HighBits = (Imm.getRawData()[1] >> 32) & 0xffffffff;
197 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpName))
204 const std::vector<uint32_t> &DecArgs,
208 for (
const auto &DecArg : DecArgs)
213 SPIRV::Decoration::Decoration Dec,
214 const std::vector<uint32_t> &DecArgs,
StringRef StrImm) {
215 auto MIB = MIRBuilder.
buildInstr(SPIRV::OpDecorate)
222 SPIRV::Decoration::Decoration Dec,
223 const std::vector<uint32_t> &DecArgs,
StringRef StrImm) {
225 auto MIB =
BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpDecorate))
232 SPIRV::Decoration::Decoration Dec,
uint32_t Member,
233 const std::vector<uint32_t> &DecArgs,
235 auto MIB = MIRBuilder.
buildInstr(SPIRV::OpMemberDecorate)
244 SPIRV::Decoration::Decoration Dec,
uint32_t Member,
245 const std::vector<uint32_t> &DecArgs,
248 auto MIB =
BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpMemberDecorate))
261 if (OpMD->getNumOperands() == 0)
267 "element of the decoration");
277 static_cast<uint32_t>(SPIRV::Decoration::NoContraction) ||
279 static_cast<uint32_t>(SPIRV::Decoration::FPFastMathMode)) {
282 auto MIB = MIRBuilder.
buildInstr(SPIRV::OpDecorate)
285 for (
unsigned OpI = 1, OpE = OpMD->getNumOperands(); OpI != OpE; ++OpI) {
288 MIB.addImm(
static_cast<uint32_t>(OpV->getZExtValue()));
302 bool IsHeader =
false;
304 for (; It !=
E && It !=
I; ++It) {
305 Opcode = It->getOpcode();
306 if (Opcode == SPIRV::OpFunction || Opcode == SPIRV::OpFunctionParameter) {
308 }
else if (IsHeader &&
309 !(Opcode == SPIRV::ASSIGN_TYPE || Opcode == SPIRV::OpLabel)) {
319 if (
I ==
MBB->begin())
322 while (
I->isTerminator() ||
I->isDebugValue()) {
323 if (
I ==
MBB->begin())
330SPIRV::StorageClass::StorageClass
334 return SPIRV::StorageClass::Function;
336 return SPIRV::StorageClass::CrossWorkgroup;
338 return SPIRV::StorageClass::UniformConstant;
340 return SPIRV::StorageClass::Workgroup;
342 return SPIRV::StorageClass::Generic;
344 return STI.
canUseExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes)
345 ? SPIRV::StorageClass::DeviceOnlyINTEL
346 : SPIRV::StorageClass::CrossWorkgroup;
348 return STI.
canUseExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes)
349 ? SPIRV::StorageClass::HostOnlyINTEL
350 : SPIRV::StorageClass::CrossWorkgroup;
352 return SPIRV::StorageClass::Input;
354 return SPIRV::StorageClass::Output;
356 return SPIRV::StorageClass::CodeSectionINTEL;
358 return SPIRV::StorageClass::Private;
360 return SPIRV::StorageClass::StorageBuffer;
362 return SPIRV::StorageClass::Uniform;
364 return SPIRV::StorageClass::PushConstant;
370SPIRV::MemorySemantics::MemorySemantics
373 case SPIRV::StorageClass::StorageBuffer:
374 case SPIRV::StorageClass::Uniform:
375 return SPIRV::MemorySemantics::UniformMemory;
376 case SPIRV::StorageClass::Workgroup:
377 return SPIRV::MemorySemantics::WorkgroupMemory;
378 case SPIRV::StorageClass::CrossWorkgroup:
379 return SPIRV::MemorySemantics::CrossWorkgroupMemory;
380 case SPIRV::StorageClass::AtomicCounter:
381 return SPIRV::MemorySemantics::AtomicCounterMemory;
382 case SPIRV::StorageClass::Image:
383 return SPIRV::MemorySemantics::ImageMemory;
385 return SPIRV::MemorySemantics::None;
392 return SPIRV::MemorySemantics::Acquire;
394 return SPIRV::MemorySemantics::Release;
396 return SPIRV::MemorySemantics::AcquireRelease;
398 return SPIRV::MemorySemantics::SequentiallyConsistent;
402 return SPIRV::MemorySemantics::None;
414 Ctx.getOrInsertSyncScopeID(
"subgroup");
416 Ctx.getOrInsertSyncScopeID(
"workgroup");
418 Ctx.getOrInsertSyncScopeID(
"device");
421 return SPIRV::Scope::Invocation;
423 return SPIRV::Scope::CrossDevice;
424 else if (Id == SubGroup)
425 return SPIRV::Scope::Subgroup;
426 else if (Id == WorkGroup)
427 return SPIRV::Scope::Workgroup;
428 else if (Id == Device)
429 return SPIRV::Scope::Device;
430 return SPIRV::Scope::CrossDevice;
437 MI->getOpcode() == SPIRV::G_TRUNC ||
MI->getOpcode() == SPIRV::G_ZEXT
438 ?
MRI->getVRegDef(
MI->getOperand(1).getReg())
441 if (GI->is(Intrinsic::spv_track_constant)) {
443 return MRI->getVRegDef(ConstReg);
445 }
else if (ConstInstr->
getOpcode() == SPIRV::ASSIGN_TYPE) {
447 return MRI->getVRegDef(ConstReg);
448 }
else if (ConstInstr->
getOpcode() == TargetOpcode::G_CONSTANT ||
449 ConstInstr->
getOpcode() == TargetOpcode::G_FCONSTANT) {
453 return MRI->getVRegDef(ConstReg);
458 assert(
MI &&
MI->getOpcode() == TargetOpcode::G_CONSTANT);
459 return MI->getOperand(1).getCImm()->getValue().getZExtValue();
464 assert(
MI &&
MI->getOpcode() == TargetOpcode::G_CONSTANT);
465 return MI->getOperand(1).getCImm()->getSExtValue();
470 return GI->is(IntrinsicID);
482 return MangledName ==
"write_pipe_2" || MangledName ==
"read_pipe_2" ||
483 MangledName ==
"write_pipe_2_bl" || MangledName ==
"read_pipe_2_bl" ||
484 MangledName ==
"write_pipe_4" || MangledName ==
"read_pipe_4" ||
485 MangledName ==
"reserve_write_pipe" ||
486 MangledName ==
"reserve_read_pipe" ||
487 MangledName ==
"commit_write_pipe" ||
488 MangledName ==
"commit_read_pipe" ||
489 MangledName ==
"work_group_reserve_write_pipe" ||
490 MangledName ==
"work_group_reserve_read_pipe" ||
491 MangledName ==
"work_group_commit_write_pipe" ||
492 MangledName ==
"work_group_commit_read_pipe" ||
493 MangledName ==
"get_pipe_num_packets_ro" ||
494 MangledName ==
"get_pipe_max_packets_ro" ||
495 MangledName ==
"get_pipe_num_packets_wo" ||
496 MangledName ==
"get_pipe_max_packets_wo" ||
497 MangledName ==
"sub_group_reserve_write_pipe" ||
498 MangledName ==
"sub_group_reserve_read_pipe" ||
499 MangledName ==
"sub_group_commit_write_pipe" ||
500 MangledName ==
"sub_group_commit_read_pipe" ||
501 MangledName ==
"to_global" || MangledName ==
"to_local" ||
502 MangledName ==
"to_private";
506 return MangledName ==
"__enqueue_kernel_basic" ||
507 MangledName ==
"__enqueue_kernel_basic_events" ||
508 MangledName ==
"__enqueue_kernel_varargs" ||
509 MangledName ==
"__enqueue_kernel_events_varargs";
513 return MangledName ==
"__get_kernel_work_group_size_impl" ||
514 MangledName ==
"__get_kernel_sub_group_count_for_ndrange_impl" ||
515 MangledName ==
"__get_kernel_max_sub_group_size_for_ndrange_impl" ||
516 MangledName ==
"__get_kernel_preferred_work_group_size_multiple_impl";
520 if (!Name.starts_with(
"__"))
525 Name ==
"__translate_sampler_initializer";
530 bool IsNonMangledSPIRV = Name.starts_with(
"__spirv_");
531 bool IsNonMangledHLSL = Name.starts_with(
"__hlsl_");
532 bool IsMangled = Name.starts_with(
"_Z");
535 if (IsNonMangledOCL || IsNonMangledSPIRV || IsNonMangledHLSL || !IsMangled)
540 std::string Result = DemangledName;
549 size_t Start, Len = 0;
550 size_t DemangledNameLenStart = 2;
551 if (Name.starts_with(
"_ZN")) {
553 size_t NameSpaceStart = Name.find_first_not_of(
"rVKRO", 3);
555 if (Name.substr(NameSpaceStart, 11) !=
"2cl7__spirv")
556 return std::string();
557 DemangledNameLenStart = NameSpaceStart + 11;
559 Start = Name.find_first_not_of(
"0123456789", DemangledNameLenStart);
560 [[maybe_unused]]
bool Error =
561 Name.substr(DemangledNameLenStart, Start - DemangledNameLenStart)
562 .getAsInteger(10, Len);
563 assert(!
Error &&
"Failed to parse demangled name length");
564 return Name.substr(Start, Len).str();
568 if (Name.starts_with(
"opencl.") || Name.starts_with(
"ocl_") ||
569 Name.starts_with(
"spirv."))
591 if (
F.getFnAttribute(
"hlsl.shader").isValid())
598 TypeName.consume_front(
"atomic_");
599 if (TypeName.consume_front(
"void"))
601 else if (TypeName.consume_front(
"bool") || TypeName.consume_front(
"_Bool"))
603 else if (TypeName.consume_front(
"char") ||
604 TypeName.consume_front(
"signed char") ||
605 TypeName.consume_front(
"unsigned char") ||
606 TypeName.consume_front(
"uchar"))
608 else if (TypeName.consume_front(
"short") ||
609 TypeName.consume_front(
"signed short") ||
610 TypeName.consume_front(
"unsigned short") ||
611 TypeName.consume_front(
"ushort"))
613 else if (TypeName.consume_front(
"int") ||
614 TypeName.consume_front(
"signed int") ||
615 TypeName.consume_front(
"unsigned int") ||
616 TypeName.consume_front(
"uint"))
618 else if (TypeName.consume_front(
"long") ||
619 TypeName.consume_front(
"signed long") ||
620 TypeName.consume_front(
"unsigned long") ||
621 TypeName.consume_front(
"ulong"))
623 else if (TypeName.consume_front(
"half") ||
624 TypeName.consume_front(
"_Float16") ||
625 TypeName.consume_front(
"__fp16"))
627 else if (TypeName.consume_front(
"float"))
629 else if (TypeName.consume_front(
"double"))
636std::unordered_set<BasicBlock *>
637PartialOrderingVisitor::getReachableFrom(BasicBlock *Start) {
638 std::queue<BasicBlock *> ToVisit;
641 std::unordered_set<BasicBlock *> Output;
642 while (ToVisit.size() != 0) {
643 BasicBlock *BB = ToVisit.front();
646 if (Output.count(BB) != 0)
660bool PartialOrderingVisitor::CanBeVisited(
BasicBlock *BB)
const {
663 if (DT.dominates(BB,
P))
667 if (BlockToOrder.count(
P) == 0)
672 Loop *
L = LI.getLoopFor(
P);
673 if (L ==
nullptr ||
L->contains(BB))
679 assert(
L->getNumBackEdges() <= 1);
685 if (Latch ==
nullptr)
689 if (BlockToOrder.count(Latch) == 0)
697 auto It = BlockToOrder.find(BB);
698 if (It != BlockToOrder.end())
699 return It->second.Rank;
704 if (DT.dominates(BB,
P))
707 auto Iterator = BlockToOrder.end();
708 Loop *L = LI.getLoopFor(
P);
709 BasicBlock *Latch = L ? L->getLoopLatch() :
nullptr;
713 if (L ==
nullptr || L->contains(BB) || Latch ==
nullptr) {
714 Iterator = BlockToOrder.find(
P);
719 Iterator = BlockToOrder.find(Latch);
722 assert(Iterator != BlockToOrder.end());
723 result = std::max(result, Iterator->second.Rank + 1);
729size_t PartialOrderingVisitor::visit(
BasicBlock *BB,
size_t Unused) {
733 size_t QueueIndex = 0;
734 while (ToVisit.size() != 0) {
738 if (!CanBeVisited(BB)) {
740 if (QueueIndex >= ToVisit.size())
742 "No valid candidate in the queue. Is the graph reducible?");
749 OrderInfo
Info = {Rank, BlockToOrder.size()};
750 BlockToOrder.emplace(BB,
Info);
753 if (Queued.count(S) != 0)
767 visit(&*
F.begin(), 0);
769 Order.reserve(
F.size());
770 for (
auto &[BB,
Info] : BlockToOrder)
771 Order.emplace_back(BB);
773 std::sort(Order.begin(), Order.end(), [&](
const auto &
LHS,
const auto &
RHS) {
774 return compare(LHS, RHS);
780 const OrderInfo &InfoLHS = BlockToOrder.at(
const_cast<BasicBlock *
>(
LHS));
781 const OrderInfo &InfoRHS = BlockToOrder.at(
const_cast<BasicBlock *
>(
RHS));
782 if (InfoLHS.Rank != InfoRHS.Rank)
783 return InfoLHS.Rank < InfoRHS.Rank;
784 return InfoLHS.TraversalIndex < InfoRHS.TraversalIndex;
789 std::unordered_set<BasicBlock *> Reachable = getReachableFrom(&Start);
790 assert(BlockToOrder.count(&Start) != 0);
793 auto It = Order.begin();
794 while (It != Order.end() && *It != &Start)
799 assert(It != Order.end());
802 std::optional<size_t> EndRank = std::nullopt;
803 for (; It != Order.end(); ++It) {
804 if (EndRank.has_value() && BlockToOrder[*It].Rank > *EndRank)
807 if (Reachable.count(*It) == 0) {
812 EndRank = BlockToOrder[*It].Rank;
822 std::vector<BasicBlock *> Order;
823 Order.reserve(
F.size());
828 assert(&*
F.begin() == Order[0]);
831 if (BB != LastBlock && &*LastBlock->
getNextNode() != BB) {
843 if (MaybeDef && MaybeDef->
getOpcode() == SPIRV::ASSIGN_TYPE)
851 constexpr unsigned MaxIters = 1024;
852 for (
unsigned I = 0;
I < MaxIters; ++
I) {
853 std::string OrdName = Name +
Twine(
I).
str();
854 if (!M.getFunction(OrdName)) {
855 Name = std::move(OrdName);
868 if (!
MRI->getRegClassOrNull(
Reg) || Force) {
879 SPIRV::AccessQualifier::AccessQualifier AccessQual,
880 bool EmitIR,
bool Force) {
883 GR, MIRBuilder.
getMRI(), MIRBuilder.
getMF(), Force);
909 SPIRV::AccessQualifier::AccessQualifier AccessQual,
bool EmitIR) {
919 Args.push_back(Arg2);
922 return B.CreateIntrinsic(IntrID, {Types}, Args);
927 if (Ty->isPtrOrPtrVectorTy())
932 for (
const Type *ArgTy : RefTy->params())
945 if (
F->getName().starts_with(
"llvm.spv."))
952SmallVector<MachineInstr *, 4>
954 unsigned MinWC,
unsigned ContinuedOpcode,
959 constexpr unsigned MaxWordCount = UINT16_MAX;
960 const size_t NumElements = Args.size();
961 size_t MaxNumElements = MaxWordCount - MinWC;
962 size_t SPIRVStructNumElements = NumElements;
964 if (NumElements > MaxNumElements) {
967 SPIRVStructNumElements = MaxNumElements;
968 MaxNumElements = MaxWordCount - 1;
974 for (
size_t I = 0;
I < SPIRVStructNumElements; ++
I)
977 Instructions.push_back(MIB.getInstr());
979 for (
size_t I = SPIRVStructNumElements;
I < NumElements;
980 I += MaxNumElements) {
981 auto MIB = MIRBuilder.
buildInstr(ContinuedOpcode);
982 for (
size_t J =
I; J < std::min(
I + MaxNumElements, NumElements); ++J)
984 Instructions.push_back(MIB.getInstr());
990 unsigned LC = SPIRV::LoopControl::None;
994 std::vector<std::pair<unsigned, unsigned>> MaskToValueMap;
996 LC |= SPIRV::LoopControl::DontUnroll;
1000 LC |= SPIRV::LoopControl::Unroll;
1002 std::optional<int>
Count =
1005 LC |= SPIRV::LoopControl::PartialCount;
1006 MaskToValueMap.emplace_back(
1007 std::make_pair(SPIRV::LoopControl::PartialCount, *
Count));
1011 for (
auto &[Mask, Val] : MaskToValueMap)
1012 Result.push_back(Val);
1018 static const std::set<unsigned> TypeFoldingSupportingOpcs = {
1019 TargetOpcode::G_ADD,
1020 TargetOpcode::G_FADD,
1021 TargetOpcode::G_STRICT_FADD,
1022 TargetOpcode::G_SUB,
1023 TargetOpcode::G_FSUB,
1024 TargetOpcode::G_STRICT_FSUB,
1025 TargetOpcode::G_MUL,
1026 TargetOpcode::G_FMUL,
1027 TargetOpcode::G_STRICT_FMUL,
1028 TargetOpcode::G_SDIV,
1029 TargetOpcode::G_UDIV,
1030 TargetOpcode::G_FDIV,
1031 TargetOpcode::G_STRICT_FDIV,
1032 TargetOpcode::G_SREM,
1033 TargetOpcode::G_UREM,
1034 TargetOpcode::G_FREM,
1035 TargetOpcode::G_STRICT_FREM,
1036 TargetOpcode::G_FNEG,
1037 TargetOpcode::G_CONSTANT,
1038 TargetOpcode::G_FCONSTANT,
1039 TargetOpcode::G_AND,
1041 TargetOpcode::G_XOR,
1042 TargetOpcode::G_SHL,
1043 TargetOpcode::G_ASHR,
1044 TargetOpcode::G_LSHR,
1045 TargetOpcode::G_SELECT,
1046 TargetOpcode::G_EXTRACT_VECTOR_ELT,
1049 return TypeFoldingSupportingOpcs;
1058 return (Def->getOpcode() == SPIRV::ASSIGN_TYPE ||
1059 Def->getOpcode() == TargetOpcode::COPY)
1060 ?
MRI->getVRegDef(Def->getOperand(1).getReg())
1072 if (Def->getOpcode() == TargetOpcode::G_CONSTANT ||
1073 Def->getOpcode() == SPIRV::OpConstantI)
1081 if (Def->getOpcode() == SPIRV::OpConstantI)
1082 return Def->getOperand(2).getImm();
1083 if (Def->getOpcode() == TargetOpcode::G_CONSTANT)
1084 return Def->getOperand(1).getCImm()->getZExtValue();
1100 while (VarPos != BB.
end() && VarPos->getOpcode() != SPIRV::OpFunction) {
1107 while (VarPos != BB.
end() &&
1108 VarPos->getOpcode() == SPIRV::OpFunctionParameter) {
1113 return VarPos != BB.
end() && VarPos->getOpcode() == SPIRV::OpLabel ? ++VarPos
1120 if (Ty->getStructNumElements() != 2)
1135 if (T_in_struct != SecondElement)
1138 auto *Padding_in_struct =
1140 if (!Padding_in_struct || Padding_in_struct->getName() !=
"spirv.Padding")
1144 TotalSize = ArraySize + 1;
1145 OriginalElementType = ArrayElementType;
1150 if (!Ty->isStructTy())
1154 Type *OriginalElementType =
nullptr;
1164 for (
Type *ElementTy : STy->elements()) {
1166 if (NewElementTy != ElementTy)
1168 NewElementTypes.
push_back(NewElementTy);
1175 if (STy->isLiteral())
1180 NewTy->setBody(NewElementTypes, STy->isPacked());
1186std::optional<SPIRV::LinkageType::LinkageType>
1189 return std::nullopt;
1192 return SPIRV::LinkageType::Import;
1195 ST.canUseExtension(SPIRV::Extension::SPV_KHR_linkonce_odr))
1196 return SPIRV::LinkageType::LinkOnceODR;
1198 return SPIRV::LinkageType::Export;
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
This file declares the MachineIRBuilder class.
uint64_t IntrinsicInst * II
static ConstantInt * getConstInt(MDNode *MD, unsigned NumOp)
Class for arbitrary precision integers.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Class to represent array types.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
LLVM Basic Block Representation.
LLVM_ABI void moveAfter(BasicBlock *MovePos)
Unlink this basic block from its current function and insert it right after MovePos in the function M...
const Instruction & front() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
FunctionType * getFunctionType() const
This class represents a function call, abstracting a target machine's calling convention.
An array constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
StringRef getAsCString() const
If this array is isCString(), then this method returns the array (without the trailing null byte) as ...
This is the shared class of boolean and integer constants.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
bool dominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
dominates - Returns true iff A dominates B.
Lightweight error class with error context and mandatory checking.
Class to represent function types.
ArrayRef< Type * > params() const
Type * getReturnType() const
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
bool hasLocalLinkage() const
bool hasHiddenVisibility() const
bool isDeclarationForLinker() const
bool hasLinkOnceODRLinkage() const
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
This is an important class for using LLVM in a threaded context.
Represents a single loop in the control flow graph.
Instances of this class represent a single low-level machine instruction.
void addOperand(const MCOperand Op)
static MCOperand createImm(int64_t Val)
const MDOperand & getOperand(unsigned I) const
unsigned getNumOperands() const
Return number of MDNode operands.
MachineInstrBundleIterator< MachineInstr > iterator
const MachineBasicBlock & front() const
Helper class to build MachineInstr.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
void setAsmPrinterFlag(uint8_t Flag)
Set a flag for the AsmPrinter.
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
NamedMDNode * getNamedMetadata(StringRef Name) const
Return the first NamedMDNode in the module with the specified name.
iterator_range< op_iterator > operands()
size_t GetNodeRank(BasicBlock *BB) const
void partialOrderVisit(BasicBlock &Start, std::function< bool(BasicBlock *)> Op)
bool compare(const BasicBlock *LHS, const BasicBlock *RHS) const
PartialOrderingVisitor(Function &F)
Wrapper class representing virtual and physical registers.
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, const MachineFunction &MF)
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineInstr &I, SPIRV::AccessQualifier::AccessQualifier AQ, bool EmitIR)
const TargetRegisterClass * getRegClass(SPIRVType *SpvType) const
LLT getRegType(SPIRVType *SpvType) const
bool canUseExtension(SPIRV::Extension::Extension E) const
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
std::string str() const
str - Get the contents as an std::string.
constexpr bool empty() const
empty - Check if the string is empty.
Class to represent struct types.
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
static LLVM_ABI StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Class to represent target extensions types, which are generally unintrospectable from target-independ...
Target - Wrapper for Target specific information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
LLVM_ABI std::string str() const
Return the twine contents as a std::string.
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
LLVM_ABI Type * getStructElementType(unsigned N) const
bool isArrayTy() const
True if this is an instance of ArrayType.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Type * getArrayElementType() const
LLVM_ABI unsigned getStructNumElements() const
LLVM_ABI uint64_t getArrayNumElements() const
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
bool isStructTy() const
True if this is an instance of StructType.
static LLVM_ABI IntegerType * getInt16Ty(LLVMContext &C)
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
static LLVM_ABI Type * getDoubleTy(LLVMContext &C)
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
Value * getOperand(unsigned i) const
LLVM Value Representation.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ BasicBlock
Various leaf nodes.
FunctionType * getOriginalFunctionType(const Function &F)
static FunctionType * extractFunctionTypeFromMetadata(NamedMDNode *NMD, FunctionType *FTy, StringRef Name)
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
@ System
Synchronized with respect to all concurrently executing threads.
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract(Y &&MD)
Extract a Value from Metadata, if any.
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
bool getVacantFunctionName(Module &M, std::string &Name)
std::string getStringImm(const MachineInstr &MI, unsigned StartIndex)
LLVM_ABI bool getBooleanLoopAttribute(const Loop *TheLoop, StringRef Name)
Returns true if Name is applied to TheLoop and enabled.
int64_t getIConstValSext(Register ConstReg, const MachineRegisterInfo *MRI)
bool isTypedPointerWrapper(const TargetExtType *ExtTy)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
static void finishBuildOpDecorate(MachineInstrBuilder &MIB, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
bool isTypeFoldingSupported(unsigned Opcode)
static uint32_t convertCharsToWord(const StringRef &Str, unsigned i)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
MachineInstr * getDef(const MachineOperand &MO, const MachineRegisterInfo *MRI)
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
auto successors(const MachineBasicBlock *BB)
CallInst * buildIntrWithMD(Intrinsic::ID IntrID, ArrayRef< Type * > Types, Value *Arg, Value *Arg2, ArrayRef< Constant * > Imms, IRBuilder<> &B)
bool matchPeeledArrayPattern(const StructType *Ty, Type *&OriginalElementType, uint64_t &TotalSize)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
unsigned getArrayComponentCount(const MachineRegisterInfo *MRI, const MachineInstr *ResType)
bool sortBlocks(Function &F)
SmallVector< unsigned, 1 > getSpirvLoopControlOperandsFromLoopMetadata(Loop *L)
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
SmallVector< MachineInstr *, 4 > createContinuedInstructions(MachineIRBuilder &MIRBuilder, unsigned Opcode, unsigned MinWC, unsigned ContinuedOpcode, ArrayRef< Register > Args, Register ReturnRegister, Register TypeID)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
MachineBasicBlock::iterator getFirstValidInstructionInsertPoint(MachineBasicBlock &BB)
bool isNestedPointer(const Type *Ty)
MetadataAsValue * buildMD(Value *Arg)
std::string getOclOrSpirvBuiltinDemangledName(StringRef Name)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
MachineBasicBlock::iterator getOpVariableMBBIt(MachineInstr &I)
Register createVirtualRegister(SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
std::string getSPIRVStringOperand(const InstType &MI, unsigned StartIndex)
void buildOpMemberDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, uint32_t Member, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Type * toTypedPointer(Type *Ty)
DEMANGLE_ABI char * itaniumDemangle(std::string_view mangled_name, bool ParseParams=true)
Returns a non-NULL pointer to a NUL-terminated C style string that should be explicitly freed,...
bool isSpecialOpaqueType(const Type *Ty)
void setRegClassType(Register Reg, SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF, bool Force)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
MachineBasicBlock::iterator getInsertPtValidEnd(MachineBasicBlock *MBB)
FunctionAddr VTableAddr Count
const MachineInstr SPIRVType
static bool isNonMangledOCLBuiltin(StringRef Name)
MachineInstr * passCopy(MachineInstr *Def, const MachineRegisterInfo *MRI)
std::optional< SPIRV::LinkageType::LinkageType > getSpirvLinkageTypeFor(const SPIRVSubtarget &ST, const GlobalValue &GV)
bool isEntryPoint(const Function &F)
const std::set< unsigned > & getTypeFoldingSupportedOpcodes()
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
LLVM_ABI std::optional< int > getOptionalIntLoopAttribute(const Loop *TheLoop, StringRef Name)
Find named metadata for a loop with an integer value.
AtomicOrdering
Atomic ordering for LLVM's memory model.
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
static bool isPipeOrAddressSpaceCastBI(const StringRef MangledName)
void buildOpSpirvDecorations(Register Reg, MachineIRBuilder &MIRBuilder, const MDNode *GVarMD, const SPIRVSubtarget &ST)
std::string getStringValueFromReg(Register Reg, MachineRegisterInfo &MRI)
int64_t foldImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
Type * parseBasicTypeName(StringRef &TypeName, LLVMContext &Ctx)
DWARFExpression::Operation Op
MachineInstr * getDefInstrMaybeConstant(Register &ConstReg, const MachineRegisterInfo *MRI)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool hasBuiltinTypePrefix(StringRef Name)
Type * getMDOperandAsType(const MDNode *N, unsigned I)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
auto predecessors(const MachineBasicBlock *BB)
static size_t getPaddedLen(const StringRef &Str)
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
void addStringImm(const StringRef &Str, MCInst &Inst)
static bool isKernelQueryBI(const StringRef MangledName)
MachineInstr * getVRegDef(MachineRegisterInfo &MRI, Register Reg)
static bool isEnqueueKernelBI(const StringRef MangledName)
Type * reconstitutePeeledArrayType(Type *Ty)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)