24#include "llvm/IR/IntrinsicsSPIRV.h"
31#include <unordered_set>
54#define GET_BuiltinGroup_DECL
55#include "SPIRVGenTables.inc"
60class SPIRVEmitIntrinsics
62 public InstVisitor<SPIRVEmitIntrinsics, Instruction *> {
63 SPIRVTargetMachine *TM =
nullptr;
64 SPIRVGlobalRegistry *GR =
nullptr;
66 bool TrackConstants =
true;
67 bool HaveFunPtrs =
false;
68 DenseMap<Instruction *, Constant *> AggrConsts;
69 DenseMap<Instruction *, Type *> AggrConstTypes;
70 DenseSet<Instruction *> AggrStores;
71 std::unordered_set<Value *> Named;
74 DenseMap<Function *, SmallVector<std::pair<unsigned, Type *>>> FDeclPtrTys;
77 bool CanTodoType =
true;
78 unsigned TodoTypeSz = 0;
79 DenseMap<Value *, bool> TodoType;
80 void insertTodoType(
Value *
Op) {
83 auto It = TodoType.try_emplace(
Op,
true);
89 auto It = TodoType.find(
Op);
90 if (It != TodoType.end() && It->second) {
98 auto It = TodoType.find(
Op);
99 return It != TodoType.end() && It->second;
103 std::unordered_set<Instruction *> TypeValidated;
106 enum WellKnownTypes { Event };
109 Type *deduceElementType(
Value *
I,
bool UnknownElemTypeI8);
110 Type *deduceElementTypeHelper(
Value *
I,
bool UnknownElemTypeI8);
111 Type *deduceElementTypeHelper(
Value *
I, std::unordered_set<Value *> &Visited,
112 bool UnknownElemTypeI8,
113 bool IgnoreKnownType =
false);
114 Type *deduceElementTypeByValueDeep(
Type *ValueTy,
Value *Operand,
115 bool UnknownElemTypeI8);
116 Type *deduceElementTypeByValueDeep(
Type *ValueTy,
Value *Operand,
117 std::unordered_set<Value *> &Visited,
118 bool UnknownElemTypeI8);
120 std::unordered_set<Value *> &Visited,
121 bool UnknownElemTypeI8);
123 bool UnknownElemTypeI8);
126 Type *deduceNestedTypeHelper(
User *
U,
bool UnknownElemTypeI8);
128 std::unordered_set<Value *> &Visited,
129 bool UnknownElemTypeI8);
133 SmallPtrSet<Instruction *, 4> *IncompleteRets,
134 const SmallPtrSet<Value *, 4> *AskOps =
nullptr,
135 bool IsPostprocessing =
false);
140 Type *reconstructType(
Value *
Op,
bool UnknownElemTypeI8,
141 bool IsPostprocessing);
146 bool UnknownElemTypeI8);
148 void insertAssignPtrTypeTargetExt(TargetExtType *AssignedType,
Value *
V,
151 Type *ExpectedElementType,
152 unsigned OperandToReplace,
155 bool shouldTryToAddMemAliasingDecoration(
Instruction *Inst);
157 void insertConstantsForFPFastMathDefault(
Module &
M);
158 void processGlobalValue(GlobalVariable &GV,
IRBuilder<> &
B);
163 std::unordered_set<Function *> &FVisited);
165 bool deduceOperandElementTypeCalledFunction(
167 Type *&KnownElemTy,
bool &Incomplete);
168 void deduceOperandElementTypeFunctionPointer(
170 Type *&KnownElemTy,
bool IsPostprocessing);
171 bool deduceOperandElementTypeFunctionRet(
172 Instruction *
I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
173 const SmallPtrSet<Value *, 4> *AskOps,
bool IsPostprocessing,
178 DenseMap<Function *, CallInst *> Ptrcasts);
180 DenseSet<std::pair<Value *, Value *>> &VisitedSubst);
183 DenseSet<std::pair<Value *, Value *>> &VisitedSubst);
184 void propagateElemTypeRec(
Value *
Op,
Type *PtrElemTy,
Type *CastElemTy,
185 DenseSet<std::pair<Value *, Value *>> &VisitedSubst,
186 std::unordered_set<Value *> &Visited,
187 DenseMap<Function *, CallInst *> Ptrcasts);
195 GetElementPtrInst *simplifyZeroLengthArrayGepInst(GetElementPtrInst *
GEP);
198 bool postprocessTypes(
Module &
M);
199 bool processFunctionPointers(
Module &
M);
200 void parseFunDeclarations(
Module &
M);
202 void useRoundingMode(ConstrainedFPIntrinsic *FPI,
IRBuilder<> &
B);
218 bool walkLogicalAccessChain(
219 GetElementPtrInst &
GEP,
220 const std::function<
void(
Type *PointedType, uint64_t
Index)>
229 Type *getGEPType(GetElementPtrInst *
GEP);
236 Type *getGEPTypeLogical(GetElementPtrInst *
GEP);
238 Instruction *buildLogicalAccessChainFromGEP(GetElementPtrInst &
GEP);
242 SPIRVEmitIntrinsics(SPIRVTargetMachine *TM =
nullptr)
243 : ModulePass(ID), TM(TM) {}
246 Instruction *visitGetElementPtrInst(GetElementPtrInst &
I);
248 Instruction *visitInsertElementInst(InsertElementInst &
I);
249 Instruction *visitExtractElementInst(ExtractElementInst &
I);
251 Instruction *visitExtractValueInst(ExtractValueInst &
I);
255 Instruction *visitAtomicCmpXchgInst(AtomicCmpXchgInst &
I);
259 StringRef getPassName()
const override {
return "SPIRV emit intrinsics"; }
261 bool runOnModule(
Module &
M)
override;
263 void getAnalysisUsage(AnalysisUsage &AU)
const override {
264 ModulePass::getAnalysisUsage(AU);
273 return II->getIntrinsicID() == Intrinsic::experimental_convergence_entry ||
274 II->getIntrinsicID() == Intrinsic::experimental_convergence_loop ||
275 II->getIntrinsicID() == Intrinsic::experimental_convergence_anchor;
278bool expectIgnoredInIRTranslation(
const Instruction *
I) {
282 switch (
II->getIntrinsicID()) {
283 case Intrinsic::invariant_start:
284 case Intrinsic::spv_resource_handlefrombinding:
285 case Intrinsic::spv_resource_getpointer:
295 if (
II->getIntrinsicID() == Intrinsic::spv_ptrcast) {
296 Value *V =
II->getArgOperand(0);
297 return getPointerRoot(V);
305char SPIRVEmitIntrinsics::ID = 0;
328 B.SetInsertPoint(
I->getParent()->getFirstNonPHIOrDbgOrAlloca());
334 B.SetCurrentDebugLocation(
I->getDebugLoc());
335 if (
I->getType()->isVoidTy())
336 B.SetInsertPoint(
I->getNextNode());
338 B.SetInsertPoint(*
I->getInsertionPointAfterDef());
343 switch (Intr->getIntrinsicID()) {
344 case Intrinsic::invariant_start:
345 case Intrinsic::invariant_end:
353 if (
I->getType()->isTokenTy())
355 "does not support token type",
360 if (!
I->hasName() ||
I->getType()->isAggregateType() ||
361 expectIgnoredInIRTranslation(
I))
369 if (Name.starts_with(
"spv.mutated_callsite"))
371 if (Name.starts_with(
"spv.named_mutated_callsite"))
372 I->setName(Name.substr(Name.rfind(
'.') + 1));
377 std::vector<Value *> Args = {
380 B.CreateIntrinsic(Intrinsic::spv_assign_name, {
I->getType()}, Args);
383void SPIRVEmitIntrinsics::replaceAllUsesWith(
Value *Src,
Value *Dest,
387 if (isTodoType(Src)) {
390 insertTodoType(Dest);
394void SPIRVEmitIntrinsics::replaceAllUsesWithAndErase(
IRBuilder<> &
B,
399 std::string
Name = Src->hasName() ? Src->getName().str() :
"";
400 Src->eraseFromParent();
403 if (Named.insert(Dest).second)
428Type *SPIRVEmitIntrinsics::reconstructType(
Value *
Op,
bool UnknownElemTypeI8,
429 bool IsPostprocessing) {
444 if (UnknownElemTypeI8) {
445 if (!IsPostprocessing)
453CallInst *SPIRVEmitIntrinsics::buildSpvPtrcast(Function *
F,
Value *
Op,
461 B.SetInsertPointPastAllocas(OpA->getParent());
464 B.SetInsertPoint(
F->getEntryBlock().getFirstNonPHIOrDbgOrAlloca());
466 Type *OpTy =
Op->getType();
470 CallInst *PtrCasted =
471 B.CreateIntrinsic(Intrinsic::spv_ptrcast, {
Types},
Args);
476void SPIRVEmitIntrinsics::replaceUsesOfWithSpvPtrcast(
478 DenseMap<Function *, CallInst *> Ptrcasts) {
480 CallInst *PtrCastedI =
nullptr;
481 auto It = Ptrcasts.
find(
F);
482 if (It == Ptrcasts.
end()) {
483 PtrCastedI = buildSpvPtrcast(
F,
Op, ElemTy);
484 Ptrcasts[
F] = PtrCastedI;
486 PtrCastedI = It->second;
488 I->replaceUsesOfWith(
Op, PtrCastedI);
491void SPIRVEmitIntrinsics::propagateElemType(
493 DenseSet<std::pair<Value *, Value *>> &VisitedSubst) {
494 DenseMap<Function *, CallInst *> Ptrcasts;
496 for (
auto *U :
Users) {
499 if (!VisitedSubst.insert(std::make_pair(U,
Op)).second)
505 TypeValidated.find(UI) != TypeValidated.end())
506 replaceUsesOfWithSpvPtrcast(
Op, ElemTy, UI, Ptrcasts);
510void SPIRVEmitIntrinsics::propagateElemTypeRec(
512 DenseSet<std::pair<Value *, Value *>> &VisitedSubst) {
513 std::unordered_set<Value *> Visited;
514 DenseMap<Function *, CallInst *> Ptrcasts;
515 propagateElemTypeRec(
Op, PtrElemTy, CastElemTy, VisitedSubst, Visited,
516 std::move(Ptrcasts));
519void SPIRVEmitIntrinsics::propagateElemTypeRec(
521 DenseSet<std::pair<Value *, Value *>> &VisitedSubst,
522 std::unordered_set<Value *> &Visited,
523 DenseMap<Function *, CallInst *> Ptrcasts) {
524 if (!Visited.insert(
Op).second)
527 for (
auto *U :
Users) {
530 if (!VisitedSubst.insert(std::make_pair(U,
Op)).second)
536 TypeValidated.find(UI) != TypeValidated.end())
537 replaceUsesOfWithSpvPtrcast(
Op, CastElemTy, UI, Ptrcasts);
545SPIRVEmitIntrinsics::deduceElementTypeByValueDeep(
Type *ValueTy,
Value *Operand,
546 bool UnknownElemTypeI8) {
547 std::unordered_set<Value *> Visited;
548 return deduceElementTypeByValueDeep(ValueTy, Operand, Visited,
552Type *SPIRVEmitIntrinsics::deduceElementTypeByValueDeep(
553 Type *ValueTy,
Value *Operand, std::unordered_set<Value *> &Visited,
554 bool UnknownElemTypeI8) {
559 deduceElementTypeHelper(Operand, Visited, UnknownElemTypeI8))
570Type *SPIRVEmitIntrinsics::deduceElementTypeByUsersDeep(
571 Value *
Op, std::unordered_set<Value *> &Visited,
bool UnknownElemTypeI8) {
583 for (User *OpU :
Op->users()) {
585 if (
Type *Ty = deduceElementTypeHelper(Inst, Visited, UnknownElemTypeI8))
598 if ((DemangledName.
starts_with(
"__spirv_ocl_printf(") ||
607Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(
Value *
I,
608 bool UnknownElemTypeI8) {
609 std::unordered_set<Value *> Visited;
610 return deduceElementTypeHelper(
I, Visited, UnknownElemTypeI8);
613void SPIRVEmitIntrinsics::maybeAssignPtrType(
Type *&Ty,
Value *
Op,
Type *RefTy,
614 bool UnknownElemTypeI8) {
616 if (!UnknownElemTypeI8)
623bool SPIRVEmitIntrinsics::walkLogicalAccessChain(
624 GetElementPtrInst &
GEP,
625 const std::function<
void(
Type *, uint64_t)> &OnLiteralIndexing,
626 const std::function<
void(
Type *,
Value *)> &OnDynamicIndexing) {
634 Value *Src = getPointerRoot(
GEP.getPointerOperand());
635 Type *CurType = deduceElementType(Src,
true);
644 OnDynamicIndexing(AT->getElementType(), Operand);
645 return AT ==
nullptr;
653 uint32_t EltTypeSize =
DL.getTypeSizeInBits(AT->getElementType()) / 8;
657 CurType = AT->getElementType();
658 OnLiteralIndexing(CurType, Index);
660 uint32_t StructSize =
DL.getTypeSizeInBits(ST) / 8;
663 const auto &STL =
DL.getStructLayout(ST);
664 unsigned Element = STL->getElementContainingOffset(
Offset);
665 Offset -= STL->getElementOffset(Element);
666 CurType =
ST->getElementType(Element);
667 OnLiteralIndexing(CurType, Element);
669 Type *EltTy = VT->getElementType();
670 TypeSize EltSizeBits =
DL.getTypeSizeInBits(EltTy);
671 assert(EltSizeBits % 8 == 0 &&
672 "Element type size in bits must be a multiple of 8.");
673 uint32_t EltTypeSize = EltSizeBits / 8;
678 OnLiteralIndexing(CurType, Index);
690SPIRVEmitIntrinsics::buildLogicalAccessChainFromGEP(GetElementPtrInst &
GEP) {
693 B.SetInsertPoint(&
GEP);
695 std::vector<Value *> Indices;
696 Indices.push_back(ConstantInt::get(
697 IntegerType::getInt32Ty(CurrF->
getContext()), 0,
false));
698 walkLogicalAccessChain(
700 [&Indices, &
B](
Type *EltType, uint64_t Index) {
702 ConstantInt::get(
B.getInt64Ty(), Index,
false));
705 uint32_t EltTypeSize =
DL.getTypeSizeInBits(EltType) / 8;
707 Offset, ConstantInt::get(
Offset->getType(), EltTypeSize,
709 Indices.push_back(Index);
714 Args.push_back(
B.getInt1(
GEP.isInBounds()));
715 Args.push_back(
GEP.getOperand(0));
717 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_gep, {
Types}, {
Args});
718 replaceAllUsesWithAndErase(
B, &
GEP, NewI);
722Type *SPIRVEmitIntrinsics::getGEPTypeLogical(GetElementPtrInst *
GEP) {
724 Type *CurType =
GEP->getResultElementType();
726 bool Interrupted = walkLogicalAccessChain(
727 *
GEP, [&CurType](
Type *EltType, uint64_t Index) { CurType = EltType; },
730 return Interrupted ?
GEP->getResultElementType() : CurType;
733Type *SPIRVEmitIntrinsics::getGEPType(GetElementPtrInst *
Ref) {
734 if (
Ref->getSourceElementType() ==
735 IntegerType::getInt8Ty(CurrF->
getContext()) &&
737 return getGEPTypeLogical(
Ref);
744 Ty =
Ref->getSourceElementType();
748 Ty =
Ref->getResultElementType();
753Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(
754 Value *
I, std::unordered_set<Value *> &Visited,
bool UnknownElemTypeI8,
755 bool IgnoreKnownType) {
761 if (!IgnoreKnownType)
766 if (!Visited.insert(
I).second)
773 maybeAssignPtrType(Ty,
I,
Ref->getAllocatedType(), UnknownElemTypeI8);
775 Ty = getGEPType(
Ref);
780 KnownTy =
Op->getType();
782 maybeAssignPtrType(Ty,
I, ElemTy, UnknownElemTypeI8);
785 Ty = SPIRV::getOriginalFunctionType(*Fn);
788 Ty = deduceElementTypeByValueDeep(
790 Ref->getNumOperands() > 0 ?
Ref->getOperand(0) :
nullptr, Visited,
794 Type *RefTy = deduceElementTypeHelper(
Ref->getPointerOperand(), Visited,
796 maybeAssignPtrType(Ty,
I, RefTy, UnknownElemTypeI8);
798 maybeAssignPtrType(Ty,
I,
Ref->getDestTy(), UnknownElemTypeI8);
800 if (
Type *Src =
Ref->getSrcTy(), *Dest =
Ref->getDestTy();
802 Ty = deduceElementTypeHelper(
Ref->getOperand(0), Visited,
807 Ty = deduceElementTypeHelper(
Op, Visited, UnknownElemTypeI8);
811 Ty = deduceElementTypeHelper(
Op, Visited, UnknownElemTypeI8);
813 Type *BestTy =
nullptr;
815 DenseMap<Type *, unsigned> PhiTys;
816 for (
int i =
Ref->getNumIncomingValues() - 1; i >= 0; --i) {
817 Ty = deduceElementTypeByUsersDeep(
Ref->getIncomingValue(i), Visited,
824 if (It.first->second > MaxN) {
825 MaxN = It.first->second;
833 for (
Value *
Op : {
Ref->getTrueValue(),
Ref->getFalseValue()}) {
834 Ty = deduceElementTypeByUsersDeep(
Op, Visited, UnknownElemTypeI8);
839 static StringMap<unsigned> ResTypeByArg = {
843 {
"__spirv_GenericCastToPtr_ToGlobal", 0},
844 {
"__spirv_GenericCastToPtr_ToLocal", 0},
845 {
"__spirv_GenericCastToPtr_ToPrivate", 0},
846 {
"__spirv_GenericCastToPtrExplicit_ToGlobal", 0},
847 {
"__spirv_GenericCastToPtrExplicit_ToLocal", 0},
848 {
"__spirv_GenericCastToPtrExplicit_ToPrivate", 0}};
852 if (
II &&
II->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
854 if (HandleType->getTargetExtName() ==
"spirv.Image" ||
855 HandleType->getTargetExtName() ==
"spirv.SignedImage") {
856 for (User *U :
II->users()) {
861 }
else if (HandleType->getTargetExtName() ==
"spirv.VulkanBuffer") {
863 Ty = HandleType->getTypeParameter(0);
875 }
else if (
II &&
II->getIntrinsicID() ==
876 Intrinsic::spv_generic_cast_to_ptr_explicit) {
877 Ty = deduceElementTypeHelper(CI->getArgOperand(0), Visited,
879 }
else if (Function *CalledF = CI->getCalledFunction()) {
880 std::string DemangledName =
882 if (DemangledName.length() > 0)
883 DemangledName = SPIRV::lookupBuiltinNameHelper(DemangledName);
884 auto AsArgIt = ResTypeByArg.
find(DemangledName);
885 if (AsArgIt != ResTypeByArg.
end())
886 Ty = deduceElementTypeHelper(CI->getArgOperand(AsArgIt->second),
887 Visited, UnknownElemTypeI8);
894 if (Ty && !IgnoreKnownType) {
905Type *SPIRVEmitIntrinsics::deduceNestedTypeHelper(User *U,
906 bool UnknownElemTypeI8) {
907 std::unordered_set<Value *> Visited;
908 return deduceNestedTypeHelper(U,
U->getType(), Visited, UnknownElemTypeI8);
911Type *SPIRVEmitIntrinsics::deduceNestedTypeHelper(
912 User *U,
Type *OrigTy, std::unordered_set<Value *> &Visited,
913 bool UnknownElemTypeI8) {
922 if (!Visited.insert(U).second)
928 for (
unsigned i = 0; i <
U->getNumOperands(); ++i) {
930 assert(
Op &&
"Operands should not be null.");
931 Type *OpTy =
Op->getType();
935 deduceElementTypeHelper(
Op, Visited, UnknownElemTypeI8))
942 Change |= Ty != OpTy;
950 if (
Value *
Op =
U->getNumOperands() > 0 ?
U->getOperand(0) :
nullptr) {
951 Type *OpTy = ArrTy->getElementType();
955 deduceElementTypeHelper(
Op, Visited, UnknownElemTypeI8))
962 Type *NewTy = ArrayType::get(Ty, ArrTy->getNumElements());
968 if (
Value *
Op =
U->getNumOperands() > 0 ?
U->getOperand(0) :
nullptr) {
969 Type *OpTy = VecTy->getElementType();
973 deduceElementTypeHelper(
Op, Visited, UnknownElemTypeI8))
980 Type *NewTy = VectorType::get(Ty, VecTy->getElementCount());
990Type *SPIRVEmitIntrinsics::deduceElementType(
Value *
I,
bool UnknownElemTypeI8) {
991 if (
Type *Ty = deduceElementTypeHelper(
I, UnknownElemTypeI8))
993 if (!UnknownElemTypeI8)
996 return IntegerType::getInt8Ty(
I->getContext());
1000 Value *PointerOperand) {
1006 return I->getType();
1014bool SPIRVEmitIntrinsics::deduceOperandElementTypeCalledFunction(
1016 Type *&KnownElemTy,
bool &Incomplete) {
1020 std::string DemangledName =
1022 if (DemangledName.length() > 0 &&
1024 const SPIRVSubtarget &
ST = TM->
getSubtarget<SPIRVSubtarget>(*CalledF);
1025 auto [Grp, Opcode, ExtNo] = SPIRV::mapBuiltinToOpcode(
1026 DemangledName,
ST.getPreferredInstructionSet());
1027 if (Opcode == SPIRV::OpGroupAsyncCopy) {
1028 for (
unsigned i = 0, PtrCnt = 0; i < CI->
arg_size() && PtrCnt < 2; ++i) {
1034 KnownElemTy = ElemTy;
1035 Ops.push_back(std::make_pair(
Op, i));
1037 }
else if (Grp == SPIRV::Atomic || Grp == SPIRV::AtomicFloating) {
1044 case SPIRV::OpAtomicFAddEXT:
1045 case SPIRV::OpAtomicFMinEXT:
1046 case SPIRV::OpAtomicFMaxEXT:
1047 case SPIRV::OpAtomicLoad:
1048 case SPIRV::OpAtomicCompareExchangeWeak:
1049 case SPIRV::OpAtomicCompareExchange:
1050 case SPIRV::OpAtomicExchange:
1051 case SPIRV::OpAtomicIAdd:
1052 case SPIRV::OpAtomicISub:
1053 case SPIRV::OpAtomicOr:
1054 case SPIRV::OpAtomicXor:
1055 case SPIRV::OpAtomicAnd:
1056 case SPIRV::OpAtomicUMin:
1057 case SPIRV::OpAtomicUMax:
1058 case SPIRV::OpAtomicSMin:
1059 case SPIRV::OpAtomicSMax: {
1064 Incomplete = isTodoType(
Op);
1065 Ops.push_back(std::make_pair(
Op, 0));
1067 case SPIRV::OpAtomicStore: {
1076 Incomplete = isTodoType(
Op);
1077 Ops.push_back(std::make_pair(
Op, 0));
1086void SPIRVEmitIntrinsics::deduceOperandElementTypeFunctionPointer(
1088 Type *&KnownElemTy,
bool IsPostprocessing) {
1092 Ops.push_back(std::make_pair(
Op, std::numeric_limits<unsigned>::max()));
1093 FunctionType *FTy = SPIRV::getOriginalFunctionType(*CI);
1094 bool IsNewFTy =
false, IsIncomplete =
false;
1097 Type *ArgTy = Arg->getType();
1102 if (isTodoType(Arg))
1103 IsIncomplete =
true;
1105 IsIncomplete =
true;
1108 ArgTy = FTy->getFunctionParamType(ParmIdx);
1112 Type *RetTy = FTy->getReturnType();
1119 IsIncomplete =
true;
1121 IsIncomplete =
true;
1124 if (!IsPostprocessing && IsIncomplete)
1127 IsNewFTy ? FunctionType::get(RetTy, ArgTys, FTy->isVarArg()) : FTy;
1130bool SPIRVEmitIntrinsics::deduceOperandElementTypeFunctionRet(
1131 Instruction *
I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
1132 const SmallPtrSet<Value *, 4> *AskOps,
bool IsPostprocessing,
1144 DenseSet<std::pair<Value *, Value *>> VisitedSubst{std::make_pair(
I,
Op)};
1145 for (User *U :
F->users()) {
1153 propagateElemType(CI, PrevElemTy, VisitedSubst);
1163 for (Instruction *IncompleteRetI : *IncompleteRets)
1164 deduceOperandElementType(IncompleteRetI,
nullptr, AskOps,
1166 }
else if (IncompleteRets) {
1169 TypeValidated.insert(
I);
1177void SPIRVEmitIntrinsics::deduceOperandElementType(
1178 Instruction *
I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
1179 const SmallPtrSet<Value *, 4> *AskOps,
bool IsPostprocessing) {
1181 Type *KnownElemTy =
nullptr;
1182 bool Incomplete =
false;
1188 Incomplete = isTodoType(
I);
1189 for (
unsigned i = 0; i <
Ref->getNumIncomingValues(); i++) {
1192 Ops.push_back(std::make_pair(
Op, i));
1198 Incomplete = isTodoType(
I);
1199 Ops.push_back(std::make_pair(
Ref->getPointerOperand(), 0));
1206 Incomplete = isTodoType(
I);
1207 Ops.push_back(std::make_pair(
Ref->getOperand(0), 0));
1211 KnownElemTy =
Ref->getSourceElementType();
1212 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1215 KnownElemTy =
I->getType();
1221 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1225 reconstructType(
Ref->getValueOperand(),
false, IsPostprocessing)))
1230 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1238 Incomplete = isTodoType(
Ref->getPointerOperand());
1239 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1247 Incomplete = isTodoType(
Ref->getPointerOperand());
1248 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1254 Incomplete = isTodoType(
I);
1255 for (
unsigned i = 0; i <
Ref->getNumOperands(); i++) {
1258 Ops.push_back(std::make_pair(
Op, i));
1266 if (deduceOperandElementTypeFunctionRet(
I, IncompleteRets, AskOps,
1267 IsPostprocessing, KnownElemTy,
Op,
1270 Incomplete = isTodoType(CurrF);
1271 Ops.push_back(std::make_pair(
Op, 0));
1277 bool Incomplete0 = isTodoType(Op0);
1278 bool Incomplete1 = isTodoType(Op1);
1280 Type *ElemTy0 = (Incomplete0 && !Incomplete1 && ElemTy1)
1282 : GR->findDeducedElementType(Op0);
1284 KnownElemTy = ElemTy0;
1285 Incomplete = Incomplete0;
1286 Ops.push_back(std::make_pair(Op1, 1));
1287 }
else if (ElemTy1) {
1288 KnownElemTy = ElemTy1;
1289 Incomplete = Incomplete1;
1290 Ops.push_back(std::make_pair(Op0, 0));
1294 deduceOperandElementTypeCalledFunction(CI,
Ops, KnownElemTy, Incomplete);
1295 else if (HaveFunPtrs)
1296 deduceOperandElementTypeFunctionPointer(CI,
Ops, KnownElemTy,
1301 if (!KnownElemTy ||
Ops.size() == 0)
1306 for (
auto &OpIt :
Ops) {
1310 Type *AskTy =
nullptr;
1311 CallInst *AskCI =
nullptr;
1312 if (IsPostprocessing && AskOps) {
1318 if (Ty == KnownElemTy)
1321 Type *OpTy =
Op->getType();
1322 if (
Op->hasUseList() &&
1329 else if (!IsPostprocessing)
1333 if (AssignCI ==
nullptr) {
1342 DenseSet<std::pair<Value *, Value *>> VisitedSubst{
1343 std::make_pair(
I,
Op)};
1344 propagateElemTypeRec(
Op, KnownElemTy, PrevElemTy, VisitedSubst);
1348 CallInst *PtrCastI =
1349 buildSpvPtrcast(
I->getParent()->getParent(),
Op, KnownElemTy);
1350 if (OpIt.second == std::numeric_limits<unsigned>::max())
1353 I->setOperand(OpIt.second, PtrCastI);
1356 TypeValidated.insert(
I);
1359void SPIRVEmitIntrinsics::replaceMemInstrUses(Instruction *Old,
1364 if (isAssignTypeInstr(U)) {
1365 B.SetInsertPoint(U);
1366 SmallVector<Value *, 2>
Args = {
New,
U->getOperand(1)};
1367 CallInst *AssignCI =
1368 B.CreateIntrinsic(Intrinsic::spv_assign_type, {
New->getType()},
Args);
1370 U->eraseFromParent();
1373 U->replaceUsesOfWith(Old, New);
1378 New->copyMetadata(*Old);
1382void SPIRVEmitIntrinsics::preprocessUndefs(
IRBuilder<> &
B) {
1383 std::queue<Instruction *> Worklist;
1387 while (!Worklist.empty()) {
1389 bool BPrepared =
false;
1392 for (
auto &
Op :
I->operands()) {
1394 if (!AggrUndef || !
Op->getType()->isAggregateType())
1401 auto *IntrUndef =
B.CreateIntrinsic(Intrinsic::spv_undef, {});
1402 Worklist.push(IntrUndef);
1403 I->replaceUsesOfWith(
Op, IntrUndef);
1404 AggrConsts[IntrUndef] = AggrUndef;
1405 AggrConstTypes[IntrUndef] = AggrUndef->getType();
1410void SPIRVEmitIntrinsics::preprocessCompositeConstants(
IRBuilder<> &
B) {
1411 std::queue<Instruction *> Worklist;
1415 while (!Worklist.empty()) {
1416 auto *
I = Worklist.front();
1419 bool KeepInst =
false;
1420 for (
const auto &
Op :
I->operands()) {
1422 Type *ResTy =
nullptr;
1425 ResTy = COp->getType();
1437 ResTy =
Op->getType()->isVectorTy() ? COp->getType() :
B.getInt32Ty();
1442 for (
unsigned i = 0; i < COp->getNumElements(); ++i)
1443 Args.push_back(COp->getElementAsConstant(i));
1447 IsPhi ?
B.SetInsertPointPastAllocas(
I->getParent()->getParent())
1448 :
B.SetInsertPoint(
I);
1452 B.CreateIntrinsic(Intrinsic::spv_const_composite, {ResTy}, {
Args});
1456 AggrConsts[CI] = AggrConst;
1457 AggrConstTypes[CI] = deduceNestedTypeHelper(AggrConst,
false);
1469 B.CreateIntrinsic(Intrinsic::spv_assign_decoration, {
I->getType()},
1474 unsigned RoundingModeDeco,
1481 ConstantInt::get(
Int32Ty, SPIRV::Decoration::FPRoundingMode)),
1490 MDNode *SaturatedConversionNode =
1492 Int32Ty, SPIRV::Decoration::SaturatedConversion))});
1499 if (Fu->isIntrinsic()) {
1500 unsigned const int IntrinsicId = Fu->getIntrinsicID();
1501 switch (IntrinsicId) {
1502 case Intrinsic::fptosi_sat:
1503 case Intrinsic::fptoui_sat:
1522 MDString *ConstraintString =
MDString::get(Ctx,
IA->getConstraintString());
1530 B.SetInsertPoint(&
Call);
1531 B.CreateIntrinsic(Intrinsic::spv_inline_asm, {
Args});
1536void SPIRVEmitIntrinsics::useRoundingMode(ConstrainedFPIntrinsic *FPI,
1539 if (!
RM.has_value())
1541 unsigned RoundingModeDeco = std::numeric_limits<unsigned>::max();
1542 switch (
RM.value()) {
1546 case RoundingMode::NearestTiesToEven:
1547 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTE;
1549 case RoundingMode::TowardNegative:
1550 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTN;
1552 case RoundingMode::TowardPositive:
1553 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTP;
1555 case RoundingMode::TowardZero:
1556 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTZ;
1558 case RoundingMode::Dynamic:
1559 case RoundingMode::NearestTiesToAway:
1563 if (RoundingModeDeco == std::numeric_limits<unsigned>::max())
1569Instruction *SPIRVEmitIntrinsics::visitSwitchInst(SwitchInst &
I) {
1573 B.SetInsertPoint(&
I);
1576 Args.push_back(
I.getCondition());
1579 for (
auto &Case :
I.cases()) {
1580 Args.push_back(Case.getCaseValue());
1581 BBCases.
push_back(Case.getCaseSuccessor());
1584 CallInst *NewI =
B.CreateIntrinsic(Intrinsic::spv_switch,
1585 {
I.getOperand(0)->getType()}, {
Args});
1589 I.eraseFromParent();
1592 B.SetInsertPoint(ParentBB);
1593 IndirectBrInst *BrI =
B.CreateIndirectBr(
1596 for (BasicBlock *BBCase : BBCases)
1602 if (
GEP->getNumIndices() == 0)
1605 return CI->getZExtValue() == 0;
1610Instruction *SPIRVEmitIntrinsics::visitGetElementPtrInst(GetElementPtrInst &
I) {
1612 B.SetInsertPoint(&
I);
1620 if (
I.getSourceElementType() ==
1621 IntegerType::getInt8Ty(CurrF->
getContext())) {
1622 return buildLogicalAccessChainFromGEP(
I);
1627 Value *PtrOp =
I.getPointerOperand();
1628 Type *SrcElemTy =
I.getSourceElementType();
1629 Type *DeducedPointeeTy = deduceElementType(PtrOp,
true);
1632 if (ArrTy->getElementType() == SrcElemTy) {
1634 Type *FirstIdxType =
I.getOperand(1)->getType();
1635 NewIndices.
push_back(ConstantInt::get(FirstIdxType, 0));
1636 for (
Value *Idx :
I.indices())
1641 Args.push_back(
B.getInt1(
I.isInBounds()));
1642 Args.push_back(
I.getPointerOperand());
1645 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_gep, {
Types}, {
Args});
1646 replaceAllUsesWithAndErase(
B, &
I, NewI);
1654 Args.push_back(
B.getInt1(
I.isInBounds()));
1656 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_gep, {
Types}, {
Args});
1657 replaceAllUsesWithAndErase(
B, &
I, NewI);
1661Instruction *SPIRVEmitIntrinsics::visitBitCastInst(BitCastInst &
I) {
1663 B.SetInsertPoint(&
I);
1672 I.eraseFromParent();
1678 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_bitcast, {
Types}, {
Args});
1679 replaceAllUsesWithAndErase(
B, &
I, NewI);
1683void SPIRVEmitIntrinsics::insertAssignPtrTypeTargetExt(
1685 Type *VTy =
V->getType();
1690 if (ElemTy != AssignedType)
1703 if (CurrentType == AssignedType)
1710 " for value " +
V->getName(),
1718void SPIRVEmitIntrinsics::replacePointerOperandWithPtrCast(
1719 Instruction *
I,
Value *Pointer,
Type *ExpectedElementType,
1721 TypeValidated.insert(
I);
1724 Type *PointerElemTy = deduceElementTypeHelper(Pointer,
false);
1725 if (PointerElemTy == ExpectedElementType ||
1731 MetadataAsValue *VMD =
buildMD(ExpectedElementVal);
1733 bool FirstPtrCastOrAssignPtrType =
true;
1739 for (
auto User :
Pointer->users()) {
1742 (
II->getIntrinsicID() != Intrinsic::spv_assign_ptr_type &&
1743 II->getIntrinsicID() != Intrinsic::spv_ptrcast) ||
1744 II->getOperand(0) != Pointer)
1749 FirstPtrCastOrAssignPtrType =
false;
1750 if (
II->getOperand(1) != VMD ||
1757 if (
II->getIntrinsicID() != Intrinsic::spv_ptrcast)
1762 if (
II->getParent() !=
I->getParent())
1765 I->setOperand(OperandToReplace,
II);
1771 if (FirstPtrCastOrAssignPtrType) {
1776 }
else if (isTodoType(Pointer)) {
1777 eraseTodoType(Pointer);
1784 DenseSet<std::pair<Value *, Value *>> VisitedSubst{
1785 std::make_pair(
I, Pointer)};
1787 propagateElemType(Pointer, PrevElemTy, VisitedSubst);
1799 auto *PtrCastI =
B.CreateIntrinsic(Intrinsic::spv_ptrcast, {
Types},
Args);
1805void SPIRVEmitIntrinsics::insertPtrCastOrAssignTypeInstr(Instruction *
I,
1810 replacePointerOperandWithPtrCast(
1811 I,
SI->getValueOperand(), IntegerType::getInt8Ty(CurrF->
getContext()),
1817 Type *OpTy =
Op->getType();
1820 if (OpTy ==
Op->getType())
1821 OpTy = deduceElementTypeByValueDeep(OpTy,
Op,
false);
1822 replacePointerOperandWithPtrCast(
I, Pointer, OpTy, 1,
B);
1827 Type *OpTy = LI->getType();
1832 Type *NewOpTy = OpTy;
1833 OpTy = deduceElementTypeByValueDeep(OpTy, LI,
false);
1834 if (OpTy == NewOpTy)
1835 insertTodoType(Pointer);
1838 replacePointerOperandWithPtrCast(
I, Pointer, OpTy, 0,
B);
1843 Type *OpTy =
nullptr;
1855 OpTy = GEPI->getSourceElementType();
1857 replacePointerOperandWithPtrCast(
I, Pointer, OpTy, 0,
B);
1859 insertTodoType(Pointer);
1871 std::string DemangledName =
1875 bool HaveTypes =
false;
1893 for (User *U : CalledArg->
users()) {
1895 if ((ElemTy = deduceElementTypeHelper(Inst,
false)) !=
nullptr)
1901 HaveTypes |= ElemTy !=
nullptr;
1906 if (DemangledName.empty() && !HaveTypes)
1924 Type *ExpectedType =
1926 if (!ExpectedType && !DemangledName.empty())
1927 ExpectedType = SPIRV::parseBuiltinCallArgumentBaseType(
1928 DemangledName,
OpIdx,
I->getContext());
1929 if (!ExpectedType || ExpectedType->
isVoidTy())
1937 replacePointerOperandWithPtrCast(CI, ArgOperand, ExpectedType,
OpIdx,
B);
1941Instruction *SPIRVEmitIntrinsics::visitInsertElementInst(InsertElementInst &
I) {
1948 I.getOperand(1)->getType(),
1949 I.getOperand(2)->getType()};
1951 B.SetInsertPoint(&
I);
1953 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_insertelt, {
Types}, {
Args});
1954 replaceAllUsesWithAndErase(
B, &
I, NewI);
1959SPIRVEmitIntrinsics::visitExtractElementInst(ExtractElementInst &
I) {
1966 B.SetInsertPoint(&
I);
1968 I.getIndexOperand()->getType()};
1969 SmallVector<Value *, 2>
Args = {
I.getVectorOperand(),
I.getIndexOperand()};
1970 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_extractelt, {
Types}, {
Args});
1971 replaceAllUsesWithAndErase(
B, &
I, NewI);
1975Instruction *SPIRVEmitIntrinsics::visitInsertValueInst(InsertValueInst &
I) {
1977 B.SetInsertPoint(&
I);
1980 Value *AggregateOp =
I.getAggregateOperand();
1984 Args.push_back(AggregateOp);
1985 Args.push_back(
I.getInsertedValueOperand());
1986 for (
auto &
Op :
I.indices())
1987 Args.push_back(
B.getInt32(
Op));
1989 B.CreateIntrinsic(Intrinsic::spv_insertv, {
Types}, {
Args});
1990 replaceMemInstrUses(&
I, NewI,
B);
1994Instruction *SPIRVEmitIntrinsics::visitExtractValueInst(ExtractValueInst &
I) {
1995 if (
I.getAggregateOperand()->getType()->isAggregateType())
1998 B.SetInsertPoint(&
I);
2000 for (
auto &
Op :
I.indices())
2001 Args.push_back(
B.getInt32(
Op));
2003 B.CreateIntrinsic(Intrinsic::spv_extractv, {
I.getType()}, {
Args});
2004 replaceAllUsesWithAndErase(
B, &
I, NewI);
2008Instruction *SPIRVEmitIntrinsics::visitLoadInst(LoadInst &
I) {
2009 if (!
I.getType()->isAggregateType())
2012 B.SetInsertPoint(&
I);
2013 TrackConstants =
false;
2018 B.CreateIntrinsic(Intrinsic::spv_load, {
I.getOperand(0)->getType()},
2019 {
I.getPointerOperand(),
B.getInt16(Flags),
2020 B.getInt8(
I.getAlign().value())});
2021 replaceMemInstrUses(&
I, NewI,
B);
2025Instruction *SPIRVEmitIntrinsics::visitStoreInst(StoreInst &
I) {
2029 B.SetInsertPoint(&
I);
2030 TrackConstants =
false;
2034 auto *PtrOp =
I.getPointerOperand();
2035 auto *NewI =
B.CreateIntrinsic(
2036 Intrinsic::spv_store, {
I.getValueOperand()->getType(), PtrOp->
getType()},
2037 {
I.getValueOperand(), PtrOp,
B.getInt16(Flags),
2038 B.getInt8(
I.getAlign().value())});
2040 I.eraseFromParent();
2044Instruction *SPIRVEmitIntrinsics::visitAllocaInst(AllocaInst &
I) {
2045 Value *ArraySize =
nullptr;
2046 if (
I.isArrayAllocation()) {
2049 SPIRV::Extension::SPV_INTEL_variable_length_array))
2051 "array allocation: this instruction requires the following "
2052 "SPIR-V extension: SPV_INTEL_variable_length_array",
2054 ArraySize =
I.getArraySize();
2057 B.SetInsertPoint(&
I);
2058 TrackConstants =
false;
2059 Type *PtrTy =
I.getType();
2062 ?
B.CreateIntrinsic(Intrinsic::spv_alloca_array,
2063 {PtrTy, ArraySize->
getType()},
2064 {ArraySize,
B.getInt8(
I.getAlign().value())})
2065 :
B.CreateIntrinsic(
Intrinsic::spv_alloca, {PtrTy},
2066 {
B.getInt8(
I.getAlign().value())});
2067 replaceAllUsesWithAndErase(
B, &
I, NewI);
2071Instruction *SPIRVEmitIntrinsics::visitAtomicCmpXchgInst(AtomicCmpXchgInst &
I) {
2072 assert(
I.getType()->isAggregateType() &&
"Aggregate result is expected");
2074 B.SetInsertPoint(&
I);
2076 Args.push_back(
B.getInt32(
2077 static_cast<uint32_t
>(
getMemScope(
I.getContext(),
I.getSyncScopeID()))));
2078 Args.push_back(
B.getInt32(
2080 Args.push_back(
B.getInt32(
2082 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_cmpxchg,
2083 {
I.getPointerOperand()->getType()}, {
Args});
2084 replaceMemInstrUses(&
I, NewI,
B);
2088Instruction *SPIRVEmitIntrinsics::visitUnreachableInst(UnreachableInst &
I) {
2090 B.SetInsertPoint(&
I);
2091 B.CreateIntrinsic(Intrinsic::spv_unreachable, {});
2095void SPIRVEmitIntrinsics::processGlobalValue(GlobalVariable &GV,
2098 static const StringSet<> ArtificialGlobals{
"llvm.global.annotations",
2099 "llvm.compiler.used"};
2109 deduceElementTypeHelper(&GV,
false);
2113 auto *InitInst =
B.CreateIntrinsic(Intrinsic::spv_init_global,
2115 InitInst->setArgOperand(1, Init);
2118 B.CreateIntrinsic(Intrinsic::spv_unref_global, GV.
getType(), &GV);
2124bool SPIRVEmitIntrinsics::insertAssignPtrTypeIntrs(Instruction *
I,
2126 bool UnknownElemTypeI8) {
2132 if (
Type *ElemTy = deduceElementType(
I, UnknownElemTypeI8)) {
2139void SPIRVEmitIntrinsics::insertAssignTypeIntrs(Instruction *
I,
2142 static StringMap<unsigned> ResTypeWellKnown = {
2143 {
"async_work_group_copy", WellKnownTypes::Event},
2144 {
"async_work_group_strided_copy", WellKnownTypes::Event},
2145 {
"__spirv_GroupAsyncCopy", WellKnownTypes::Event}};
2149 bool IsKnown =
false;
2154 std::string DemangledName =
2157 if (DemangledName.length() > 0)
2159 SPIRV::lookupBuiltinNameHelper(DemangledName, &DecorationId);
2160 auto ResIt = ResTypeWellKnown.
find(DemangledName);
2161 if (ResIt != ResTypeWellKnown.
end()) {
2164 switch (ResIt->second) {
2165 case WellKnownTypes::Event:
2172 switch (DecorationId) {
2175 case FPDecorationId::SAT:
2178 case FPDecorationId::RTE:
2180 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTE,
B);
2182 case FPDecorationId::RTZ:
2184 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTZ,
B);
2186 case FPDecorationId::RTP:
2188 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTP,
B);
2190 case FPDecorationId::RTN:
2192 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTN,
B);
2198 Type *Ty =
I->getType();
2201 Type *TypeToAssign = Ty;
2203 if (
II->getIntrinsicID() == Intrinsic::spv_const_composite ||
2204 II->getIntrinsicID() == Intrinsic::spv_undef) {
2205 auto It = AggrConstTypes.find(
II);
2206 if (It == AggrConstTypes.end())
2208 TypeToAssign = It->second;
2214 for (
const auto &
Op :
I->operands()) {
2221 Type *OpTy =
Op->getType();
2223 CallInst *AssignCI =
2228 Type *OpTy =
Op->getType();
2243 CallInst *AssignCI =
2253bool SPIRVEmitIntrinsics::shouldTryToAddMemAliasingDecoration(
2254 Instruction *Inst) {
2256 if (!STI->
canUseExtension(SPIRV::Extension::SPV_INTEL_memory_access_aliasing))
2267 case Intrinsic::spv_load:
2268 case Intrinsic::spv_store:
2275 const std::string
Prefix =
"__spirv_Atomic";
2276 const bool IsAtomic =
Name.find(Prefix) == 0;
2284void SPIRVEmitIntrinsics::insertSpirvDecorations(Instruction *
I,
2286 if (MDNode *MD =
I->getMetadata(
"spirv.Decorations")) {
2288 B.CreateIntrinsic(Intrinsic::spv_assign_decoration, {
I->getType()},
2293 auto processMemAliasingDecoration = [&](
unsigned Kind) {
2294 if (MDNode *AliasListMD =
I->getMetadata(Kind)) {
2295 if (shouldTryToAddMemAliasingDecoration(
I)) {
2296 uint32_t Dec =
Kind == LLVMContext::MD_alias_scope
2297 ? SPIRV::Decoration::AliasScopeINTEL
2298 : SPIRV::Decoration::NoAliasINTEL;
2300 I, ConstantInt::get(
B.getInt32Ty(), Dec),
2303 B.CreateIntrinsic(Intrinsic::spv_assign_aliasing_decoration,
2304 {
I->getType()}, {
Args});
2308 processMemAliasingDecoration(LLVMContext::MD_alias_scope);
2309 processMemAliasingDecoration(LLVMContext::MD_noalias);
2312 if (MDNode *MD =
I->getMetadata(LLVMContext::MD_fpmath)) {
2314 bool AllowFPMaxError =
2316 if (!AllowFPMaxError)
2320 B.CreateIntrinsic(Intrinsic::spv_assign_fpmaxerror_decoration,
2329 &FPFastMathDefaultInfoMap,
2331 auto it = FPFastMathDefaultInfoMap.
find(
F);
2332 if (it != FPFastMathDefaultInfoMap.
end())
2340 SPIRV::FPFastMathMode::None);
2342 SPIRV::FPFastMathMode::None);
2344 SPIRV::FPFastMathMode::None);
2345 return FPFastMathDefaultInfoMap[
F] = std::move(FPFastMathDefaultInfoVec);
2351 size_t BitWidth = Ty->getScalarSizeInBits();
2355 assert(Index >= 0 && Index < 3 &&
2356 "Expected FPFastMathDefaultInfo for half, float, or double");
2357 assert(FPFastMathDefaultInfoVec.
size() == 3 &&
2358 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2359 return FPFastMathDefaultInfoVec[Index];
2362void SPIRVEmitIntrinsics::insertConstantsForFPFastMathDefault(
Module &M) {
2364 if (!
ST->canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2))
2373 auto Node =
M.getNamedMetadata(
"spirv.ExecutionMode");
2375 if (!
M.getNamedMetadata(
"opencl.enable.FP_CONTRACT")) {
2383 ConstantInt::get(Type::getInt32Ty(
M.getContext()), 0);
2386 [[maybe_unused]] GlobalVariable *GV =
2387 new GlobalVariable(M,
2388 Type::getInt32Ty(
M.getContext()),
2402 DenseMap<Function *, SPIRV::FPFastMathDefaultInfoVector>
2403 FPFastMathDefaultInfoMap;
2405 for (
unsigned i = 0; i <
Node->getNumOperands(); i++) {
2414 if (EM == SPIRV::ExecutionMode::FPFastMathDefault) {
2416 "Expected 4 operands for FPFastMathDefault");
2422 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2424 SPIRV::FPFastMathDefaultInfo &
Info =
2427 Info.FPFastMathDefault =
true;
2428 }
else if (EM == SPIRV::ExecutionMode::ContractionOff) {
2430 "Expected no operands for ContractionOff");
2434 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2436 for (SPIRV::FPFastMathDefaultInfo &
Info : FPFastMathDefaultInfoVec) {
2437 Info.ContractionOff =
true;
2439 }
else if (EM == SPIRV::ExecutionMode::SignedZeroInfNanPreserve) {
2441 "Expected 1 operand for SignedZeroInfNanPreserve");
2442 unsigned TargetWidth =
2447 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2451 assert(Index >= 0 && Index < 3 &&
2452 "Expected FPFastMathDefaultInfo for half, float, or double");
2453 assert(FPFastMathDefaultInfoVec.
size() == 3 &&
2454 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2455 FPFastMathDefaultInfoVec[
Index].SignedZeroInfNanPreserve =
true;
2459 std::unordered_map<unsigned, GlobalVariable *> GlobalVars;
2460 for (
auto &[Func, FPFastMathDefaultInfoVec] : FPFastMathDefaultInfoMap) {
2461 if (FPFastMathDefaultInfoVec.
empty())
2464 for (
const SPIRV::FPFastMathDefaultInfo &
Info : FPFastMathDefaultInfoVec) {
2465 assert(
Info.Ty &&
"Expected target type for FPFastMathDefaultInfo");
2468 if (Flags == SPIRV::FPFastMathMode::None && !
Info.ContractionOff &&
2469 !
Info.SignedZeroInfNanPreserve && !
Info.FPFastMathDefault)
2473 if (
Info.ContractionOff && (Flags & SPIRV::FPFastMathMode::AllowContract))
2475 "and AllowContract");
2477 if (
Info.SignedZeroInfNanPreserve &&
2479 (SPIRV::FPFastMathMode::NotNaN | SPIRV::FPFastMathMode::NotInf |
2480 SPIRV::FPFastMathMode::NSZ))) {
2481 if (
Info.FPFastMathDefault)
2483 "SignedZeroInfNanPreserve but at least one of "
2484 "NotNaN/NotInf/NSZ is enabled.");
2487 if ((Flags & SPIRV::FPFastMathMode::AllowTransform) &&
2488 !((Flags & SPIRV::FPFastMathMode::AllowReassoc) &&
2489 (Flags & SPIRV::FPFastMathMode::AllowContract))) {
2491 "AllowTransform requires AllowReassoc and "
2492 "AllowContract to be set.");
2495 auto it = GlobalVars.find(Flags);
2496 GlobalVariable *GV =
nullptr;
2497 if (it != GlobalVars.end()) {
2503 ConstantInt::get(Type::getInt32Ty(
M.getContext()), Flags);
2506 GV =
new GlobalVariable(M,
2507 Type::getInt32Ty(
M.getContext()),
2512 GlobalVars[
Flags] = GV;
2518void SPIRVEmitIntrinsics::processInstrAfterVisit(Instruction *
I,
2521 bool IsConstComposite =
2522 II &&
II->getIntrinsicID() == Intrinsic::spv_const_composite;
2523 if (IsConstComposite && TrackConstants) {
2525 auto t = AggrConsts.find(
I);
2526 assert(t != AggrConsts.end());
2529 {
II->getType(),
II->getType()}, t->second,
I, {},
B);
2531 NewOp->setArgOperand(0,
I);
2534 for (
const auto &
Op :
I->operands()) {
2538 unsigned OpNo =
Op.getOperandNo();
2539 if (
II && ((
II->getIntrinsicID() == Intrinsic::spv_gep && OpNo == 0) ||
2540 (
II->paramHasAttr(OpNo, Attribute::ImmArg))))
2544 IsPhi ?
B.SetInsertPointPastAllocas(
I->getParent()->getParent())
2545 :
B.SetInsertPoint(
I);
2548 Type *OpTy =
Op->getType();
2556 {OpTy, OpTyVal->
getType()},
Op, OpTyVal, {},
B);
2558 if (!IsConstComposite &&
isPointerTy(OpTy) && OpElemTy !=
nullptr &&
2559 OpElemTy != IntegerType::getInt8Ty(
I->getContext())) {
2561 SmallVector<Value *, 2>
Args = {
2564 CallInst *PtrCasted =
2565 B.CreateIntrinsic(Intrinsic::spv_ptrcast, {
Types},
Args);
2570 I->setOperand(OpNo, NewOp);
2572 if (Named.insert(
I).second)
2576Type *SPIRVEmitIntrinsics::deduceFunParamElementType(Function *
F,
2578 std::unordered_set<Function *> FVisited;
2579 return deduceFunParamElementType(
F,
OpIdx, FVisited);
2582Type *SPIRVEmitIntrinsics::deduceFunParamElementType(
2583 Function *
F,
unsigned OpIdx, std::unordered_set<Function *> &FVisited) {
2585 if (!FVisited.insert(
F).second)
2588 std::unordered_set<Value *> Visited;
2591 for (User *U :
F->users()) {
2603 if (
Type *Ty = deduceElementTypeHelper(OpArg, Visited,
false))
2606 for (User *OpU : OpArg->
users()) {
2608 if (!Inst || Inst == CI)
2611 if (
Type *Ty = deduceElementTypeHelper(Inst, Visited,
false))
2618 if (FVisited.find(OuterF) != FVisited.end())
2620 for (
unsigned i = 0; i < OuterF->
arg_size(); ++i) {
2621 if (OuterF->
getArg(i) == OpArg) {
2622 Lookup.push_back(std::make_pair(OuterF, i));
2629 for (
auto &Pair :
Lookup) {
2630 if (
Type *Ty = deduceFunParamElementType(Pair.first, Pair.second, FVisited))
2637void SPIRVEmitIntrinsics::processParamTypesByFunHeader(Function *
F,
2639 B.SetInsertPointPastAllocas(
F);
2653 for (User *U :
F->users()) {
2669 for (User *U : Arg->
users()) {
2673 CI->
getParent()->getParent() == CurrF) {
2675 deduceOperandElementTypeFunctionPointer(CI,
Ops, ElemTy,
false);
2686void SPIRVEmitIntrinsics::processParamTypes(Function *
F,
IRBuilder<> &
B) {
2687 B.SetInsertPointPastAllocas(
F);
2693 if (!ElemTy && (ElemTy = deduceFunParamElementType(
F,
OpIdx)) !=
nullptr) {
2695 DenseSet<std::pair<Value *, Value *>> VisitedSubst;
2697 propagateElemType(Arg, IntegerType::getInt8Ty(
F->getContext()),
2709 bool IsNewFTy =
false;
2725bool SPIRVEmitIntrinsics::processFunctionPointers(
Module &M) {
2728 if (
F.isIntrinsic())
2730 if (
F.isDeclaration()) {
2731 for (User *U :
F.users()) {
2744 for (User *U :
F.users()) {
2746 if (!
II ||
II->arg_size() != 3 ||
II->getOperand(0) != &
F)
2748 if (
II->getIntrinsicID() == Intrinsic::spv_assign_ptr_type ||
2749 II->getIntrinsicID() == Intrinsic::spv_ptrcast) {
2756 if (Worklist.
empty())
2762 "cannot allocate a name for the internal service function");
2763 LLVMContext &Ctx =
M.getContext();
2771 for (Function *
F : Worklist) {
2773 for (
const auto &Arg :
F->args())
2775 IRB.CreateCall(
F, Args);
2777 IRB.CreateRetVoid();
2783void SPIRVEmitIntrinsics::applyDemangledPtrArgTypes(
IRBuilder<> &
B) {
2784 DenseMap<Function *, CallInst *> Ptrcasts;
2785 for (
auto It : FDeclPtrTys) {
2787 for (
auto *U :
F->users()) {
2792 for (
auto [Idx, ElemTy] : It.second) {
2800 B.SetInsertPointPastAllocas(Arg->
getParent());
2805 replaceUsesOfWithSpvPtrcast(Param,
normalizeType(ElemTy), CI,
2814 .getFirstNonPHIOrDbgOrAlloca());
2835SPIRVEmitIntrinsics::simplifyZeroLengthArrayGepInst(GetElementPtrInst *
GEP) {
2842 Type *SrcTy =
GEP->getSourceElementType();
2843 SmallVector<Value *, 8> Indices(
GEP->indices());
2845 if (ArrTy && ArrTy->getNumElements() == 0 &&
2847 Indices.erase(Indices.begin());
2848 SrcTy = ArrTy->getElementType();
2850 GEP->getNoWrapFlags(),
"",
2851 GEP->getIterator());
2856bool SPIRVEmitIntrinsics::runOnFunction(Function &Func) {
2857 if (
Func.isDeclaration())
2861 GR =
ST.getSPIRVGlobalRegistry();
2865 ST.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers);
2870 AggrConstTypes.clear();
2875 SmallPtrSet<Instruction *, 4> DeadInsts;
2878 if (!
Ref || GR->findDeducedElementType(
Ref))
2881 GetElementPtrInst *NewGEP = simplifyZeroLengthArrayGepInst(
Ref);
2883 Ref->replaceAllUsesWith(NewGEP);
2887 if (
Type *GepTy = getGEPType(
Ref))
2891 for (
auto *
I : DeadInsts) {
2892 assert(
I->use_empty() &&
"Dead instruction should not have any uses left");
2893 I->eraseFromParent();
2896 processParamTypesByFunHeader(CurrF,
B);
2905 Type *ElTy =
SI->getValueOperand()->getType();
2910 B.SetInsertPoint(&
Func.getEntryBlock(),
Func.getEntryBlock().begin());
2911 for (
auto &GV :
Func.getParent()->globals())
2912 processGlobalValue(GV,
B);
2914 preprocessUndefs(
B);
2915 preprocessCompositeConstants(
B);
2919 applyDemangledPtrArgTypes(
B);
2922 for (
auto &
I : Worklist) {
2924 if (isConvergenceIntrinsic(
I))
2927 bool Postpone = insertAssignPtrTypeIntrs(
I,
B,
false);
2929 insertAssignTypeIntrs(
I,
B);
2930 insertPtrCastOrAssignTypeInstr(
I,
B);
2934 if (Postpone && !GR->findAssignPtrTypeInstr(
I))
2935 insertAssignPtrTypeIntrs(
I,
B,
true);
2938 useRoundingMode(FPI,
B);
2943 SmallPtrSet<Instruction *, 4> IncompleteRets;
2945 deduceOperandElementType(&
I, &IncompleteRets);
2949 for (BasicBlock &BB : Func)
2950 for (PHINode &Phi : BB.
phis())
2952 deduceOperandElementType(&Phi,
nullptr);
2954 for (
auto *
I : Worklist) {
2955 TrackConstants =
true;
2965 if (isConvergenceIntrinsic(
I))
2969 processInstrAfterVisit(
I,
B);
2976bool SPIRVEmitIntrinsics::postprocessTypes(
Module &M) {
2977 if (!GR || TodoTypeSz == 0)
2980 unsigned SzTodo = TodoTypeSz;
2981 DenseMap<Value *, SmallPtrSet<Value *, 4>> ToProcess;
2986 CallInst *AssignCI = GR->findAssignPtrTypeInstr(
Op);
2987 Type *KnownTy = GR->findDeducedElementType(
Op);
2988 if (!KnownTy || !AssignCI)
2994 std::unordered_set<Value *> Visited;
2995 if (
Type *ElemTy = deduceElementTypeHelper(
Op, Visited,
false,
true)) {
2996 if (ElemTy != KnownTy) {
2997 DenseSet<std::pair<Value *, Value *>> VisitedSubst;
2998 propagateElemType(CI, ElemTy, VisitedSubst);
3005 if (
Op->hasUseList()) {
3006 for (User *U :
Op->users()) {
3013 if (TodoTypeSz == 0)
3018 SmallPtrSet<Instruction *, 4> IncompleteRets;
3020 auto It = ToProcess.
find(&
I);
3021 if (It == ToProcess.
end())
3023 It->second.remove_if([
this](
Value *V) {
return !isTodoType(V); });
3024 if (It->second.size() == 0)
3026 deduceOperandElementType(&
I, &IncompleteRets, &It->second,
true);
3027 if (TodoTypeSz == 0)
3032 return SzTodo > TodoTypeSz;
3036void SPIRVEmitIntrinsics::parseFunDeclarations(
Module &M) {
3038 if (!
F.isDeclaration() ||
F.isIntrinsic())
3042 if (DemangledName.empty())
3046 auto [Grp, Opcode, ExtNo] = SPIRV::mapBuiltinToOpcode(
3047 DemangledName,
ST.getPreferredInstructionSet());
3048 if (Opcode != SPIRV::OpGroupAsyncCopy)
3051 SmallVector<unsigned> Idxs;
3060 LLVMContext &Ctx =
F.getContext();
3062 SPIRV::parseBuiltinTypeStr(TypeStrs, DemangledName, Ctx);
3063 if (!TypeStrs.
size())
3066 for (
unsigned Idx : Idxs) {
3067 if (Idx >= TypeStrs.
size())
3070 SPIRV::parseBuiltinCallArgumentType(TypeStrs[Idx].trim(), Ctx))
3073 FDeclPtrTys[&
F].push_back(std::make_pair(Idx, ElemTy));
3078bool SPIRVEmitIntrinsics::runOnModule(
Module &M) {
3081 parseFunDeclarations(M);
3082 insertConstantsForFPFastMathDefault(M);
3092 if (!
F.isDeclaration() && !
F.isIntrinsic()) {
3094 processParamTypes(&
F,
B);
3098 CanTodoType =
false;
3099 Changed |= postprocessTypes(M);
3102 Changed |= processFunctionPointers(M);
3108 return new SPIRVEmitIntrinsics(TM);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
static void replaceAllUsesWith(Value *Old, Value *New, SmallPtrSet< BasicBlock *, 32 > &FreshBBs, bool IsHuge)
Replace all old uses with new ones, and push the updated BBs into FreshBBs.
This file defines the DenseSet and SmallDenseSet classes.
static bool runOnFunction(Function &F, bool PostInlining)
iv Induction Variable Users
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Machine Check Debug Module
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
static unsigned getNumElements(Type *Ty)
static bool isMemInstrToReplace(Instruction *I)
static bool isAggrConstForceInt32(const Value *V)
static SPIRV::FPFastMathDefaultInfoVector & getOrCreateFPFastMathDefaultInfoVec(const Module &M, DenseMap< Function *, SPIRV::FPFastMathDefaultInfoVector > &FPFastMathDefaultInfoMap, Function *F)
static Type * getAtomicElemTy(SPIRVGlobalRegistry *GR, Instruction *I, Value *PointerOperand)
static void reportFatalOnTokenType(const Instruction *I)
static void setInsertPointAfterDef(IRBuilder<> &B, Instruction *I)
static void emitAssignName(Instruction *I, IRBuilder<> &B)
static Type * getPointeeTypeByCallInst(StringRef DemangledName, Function *CalledF, unsigned OpIdx)
static void createRoundingModeDecoration(Instruction *I, unsigned RoundingModeDeco, IRBuilder<> &B)
static void createDecorationIntrinsic(Instruction *I, MDNode *Node, IRBuilder<> &B)
static SPIRV::FPFastMathDefaultInfo & getFPFastMathDefaultInfo(SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec, const Type *Ty)
static bool IsKernelArgInt8(Function *F, StoreInst *SI)
static void addSaturatedDecorationToIntrinsic(Instruction *I, IRBuilder<> &B)
static bool isFirstIndexZero(const GetElementPtrInst *GEP)
static void setInsertPointSkippingPhis(IRBuilder<> &B, Instruction *I)
static FunctionType * getFunctionPointerElemType(Function *F, SPIRVGlobalRegistry *GR)
static void createSaturatedConversionDecoration(Instruction *I, IRBuilder<> &B)
static Type * restoreMutatedType(SPIRVGlobalRegistry *GR, Instruction *I, Type *Ty)
static bool requireAssignType(Instruction *I)
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
static void insertSpirvDecorations(MachineFunction &MF, SPIRVGlobalRegistry *GR, MachineIRBuilder MIB)
#define SPIRV_BACKEND_SERVICE_FUN_NAME
StringSet - A set-like wrapper for the StringMap.
static SymbolRef::Type getType(const Symbol *Sym)
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
This class represents an incoming formal argument to a Function.
const Function * getParent() const
static unsigned getPointerOperandIndex()
static unsigned getPointerOperandIndex()
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
const Function * getParent() const
Return the enclosing method, or null if none.
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
static LLVM_ABI BlockAddress * get(Function *F, BasicBlock *BB)
Return a BlockAddress for the specified function and basic block.
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
void addFnAttr(Attribute::AttrKind Kind)
Add function attributes to this function.
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
const DataLayout & getDataLayout() const
Get the data layout of the module this function belongs to.
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Type * getReturnType() const
Returns the type of the ret val.
Argument * getArg(unsigned i) const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static unsigned getPointerOperandIndex()
PointerType * getType() const
Global values are always pointers.
@ PrivateLinkage
Like Internal, but omit from symbol table.
@ InternalLinkage
Rename collisions when linking (static functions).
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
LLVM_ABI void addDestination(BasicBlock *Dest)
Add a destination.
Base class for instruction visitors.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
This is an important class for using LLVM in a threaded context.
static unsigned getPointerOperandIndex()
const MDOperand & getOperand(unsigned I) const
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
unsigned getNumOperands() const
Return number of MDNode operands.
static LLVM_ABI MDString * get(LLVMContext &Context, StringRef Str)
Flags
Flags values. These may be or'd together.
ModulePass class - This class is used to implement unstructured interprocedural optimizations and ana...
A Module instance is used to store all the information related to an LLVM module.
void addAssignPtrTypeInstr(Value *Val, CallInst *AssignPtrTyCI)
void buildAssignPtr(IRBuilder<> &B, Type *ElemTy, Value *Arg)
Type * findDeducedCompositeType(const Value *Val)
void replaceAllUsesWith(Value *Old, Value *New, bool DeleteOld=true)
void addDeducedElementType(Value *Val, Type *Ty)
void addReturnType(const Function *ArgF, TypedPointerType *DerivedTy)
Type * findMutated(const Value *Val)
void addDeducedCompositeType(Value *Val, Type *Ty)
void buildAssignType(IRBuilder<> &B, Type *Ty, Value *Arg)
Type * findDeducedElementType(const Value *Val)
void updateAssignType(CallInst *AssignCI, Value *Arg, Value *OfType)
CallInst * findAssignPtrTypeInstr(const Value *Val)
const SPIRVTargetLowering * getTargetLowering() const override
bool isLogicalSPIRV() const
bool canUseExtension(SPIRV::Extension::Extension E) const
const SPIRVSubtarget * getSubtargetImpl() const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
static unsigned getPointerOperandIndex()
iterator find(StringRef Key)
StringRef - Represent a constant reference to a string, i.e.
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
bool contains(StringRef key) const
Check if the set contains the given key.
static LLVM_ABI StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
static LLVM_ABI TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types={}, ArrayRef< unsigned > Ints={})
Return a target extension type having the specified name and optional type and integer parameters.
const STC & getSubtarget(const Function &F) const
This method returns a pointer to the specified type of TargetSubtargetInfo.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isArrayTy() const
True if this is an instance of ArrayType.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
bool isPointerTy() const
True if this is an instance of PointerType.
Type * getArrayElementType() const
LLVM_ABI StringRef getTargetExtName() const
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
bool isStructTy() const
True if this is an instance of StructType.
bool isTargetExtTy() const
Return true if this is a target extension type.
bool isAggregateType() const
Return true if the type is an aggregate type.
static LLVM_ABI Type * getDoubleTy(LLVMContext &C)
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
bool isVoidTy() const
Return true if this is 'void'.
static LLVM_ABI bool isValidElementType(Type *ElemTy)
Return true if the specified type is valid as a element type.
static LLVM_ABI TypedPointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
void setOperand(unsigned i, Value *Val)
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
iterator_range< user_iterator > users()
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
std::pair< iterator, bool > insert(const ValueT &V)
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
const ParentTy * getParent() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ BasicBlock
Various leaf nodes.
bool match(Val *V, const Pattern &P)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
DenseSetImpl< ValueT, DenseMap< ValueT, DenseSetEmpty, ValueInfoT, DenseSetPair< ValueT > >, ValueInfoT > DenseSet
ElementType
The element type of an SRV or UAV resource.
@ User
could "use" a pointer
NodeAddr< PhiNode * > Phi
NodeAddr< NodeBase * > Node
NodeAddr< FuncNode * > Func
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
bool getVacantFunctionName(Module &M, std::string &Name)
FunctionAddr VTableAddr Value
bool isTypedPointerWrapper(const TargetExtType *ExtTy)
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
ModulePass * createSPIRVEmitIntrinsicsPass(SPIRVTargetMachine *TM)
unsigned getPointerAddressSpace(const Type *T)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
CallInst * buildIntrWithMD(Intrinsic::ID IntrID, ArrayRef< Type * > Types, Value *Arg, Value *Arg2, ArrayRef< Constant * > Imms, IRBuilder<> &B)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
bool isNestedPointer(const Type *Ty)
MetadataAsValue * buildMD(Value *Arg)
std::string getOclOrSpirvBuiltinDemangledName(StringRef Name)
auto reverse(ContainerTy &&C)
Type * getTypedPointerWrapper(Type *ElemTy, unsigned AS)
bool isPointerTy(const Type *T)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
@ Ref
The access may reference the value stored in memory.
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
DWARFExpression::Operation Op
Type * getPointeeTypeByAttr(Argument *Arg)
bool hasPointeeTypeAttr(Argument *Arg)
constexpr unsigned BitWidth
bool isEquivalentTypes(Type *Ty1, Type *Ty2)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
bool hasInitializer(const GlobalVariable *GV)
Type * normalizeType(Type *Ty)
@ Enabled
Convert any .debug_str_offsets tables to DWARF64 if needed.
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
Type * getPointeeType(const Type *Ty)
PoisonValue * getNormalizedPoisonValue(Type *Ty)
bool isUntypedPointerTy(const Type *T)
Type * reconstitutePeeledArrayType(Type *Ty)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
static size_t computeFPFastMathDefaultInfoVecIndex(size_t BitWidth)