24#include "llvm/IR/IntrinsicsSPIRV.h"
32#include <unordered_set>
56 cl::desc(
"Emit OpName for all instructions"),
60#define GET_BuiltinGroup_DECL
61#include "SPIRVGenTables.inc"
66class GlobalVariableUsers {
67 template <
typename T1,
typename T2>
68 using OneToManyMapTy = DenseMap<T1, SmallPtrSet<T2, 4>>;
70 OneToManyMapTy<const GlobalVariable *, const Function *> GlobalIsUsedByFun;
72 void collectGlobalUsers(
73 const GlobalVariable *GV,
74 OneToManyMapTy<const GlobalVariable *, const GlobalVariable *>
75 &GlobalIsUsedByGlobal) {
77 while (!
Stack.empty()) {
81 GlobalIsUsedByFun[GV].insert(
I->getFunction());
86 GlobalIsUsedByGlobal[GV].insert(UserGV);
91 Stack.append(
C->user_begin(),
C->user_end());
95 bool propagateGlobalToGlobalUsers(
96 OneToManyMapTy<const GlobalVariable *, const GlobalVariable *>
97 &GlobalIsUsedByGlobal) {
100 for (
auto &[GV, UserGlobals] : GlobalIsUsedByGlobal) {
101 OldUsersGlobals.
assign(UserGlobals.begin(), UserGlobals.end());
102 for (
const GlobalVariable *UserGV : OldUsersGlobals) {
103 auto It = GlobalIsUsedByGlobal.find(UserGV);
104 if (It == GlobalIsUsedByGlobal.end())
112 void propagateGlobalToFunctionReferences(
113 OneToManyMapTy<const GlobalVariable *, const GlobalVariable *>
114 &GlobalIsUsedByGlobal) {
115 for (
auto &[GV, UserGlobals] : GlobalIsUsedByGlobal) {
116 auto &UserFunctions = GlobalIsUsedByFun[GV];
117 for (
const GlobalVariable *UserGV : UserGlobals) {
118 auto It = GlobalIsUsedByFun.find(UserGV);
119 if (It == GlobalIsUsedByFun.end())
130 OneToManyMapTy<const GlobalVariable *, const GlobalVariable *>
131 GlobalIsUsedByGlobal;
132 GlobalIsUsedByFun.clear();
133 for (GlobalVariable &GV :
M.globals())
134 collectGlobalUsers(&GV, GlobalIsUsedByGlobal);
137 while (propagateGlobalToGlobalUsers(GlobalIsUsedByGlobal))
140 propagateGlobalToFunctionReferences(GlobalIsUsedByGlobal);
143 using FunctionSetType =
typename decltype(GlobalIsUsedByFun)::mapped_type;
144 const FunctionSetType &
145 getTransitiveUserFunctions(
const GlobalVariable &GV)
const {
146 auto It = GlobalIsUsedByFun.find(&GV);
147 if (It != GlobalIsUsedByFun.end())
150 static const FunctionSetType
Empty{};
155static bool isaGEP(
const Value *V) {
159class SPIRVEmitIntrinsics
161 public InstVisitor<SPIRVEmitIntrinsics, Instruction *> {
162 SPIRVTargetMachine *TM =
nullptr;
163 SPIRVGlobalRegistry *GR =
nullptr;
165 bool TrackConstants =
true;
166 bool HaveFunPtrs =
false;
167 DenseMap<Instruction *, Constant *> AggrConsts;
168 DenseMap<Instruction *, Type *> AggrConstTypes;
169 DenseSet<Instruction *> AggrStores;
170 GlobalVariableUsers GVUsers;
171 std::unordered_set<Value *> Named;
174 DenseMap<Function *, SmallVector<std::pair<unsigned, Type *>>> FDeclPtrTys;
177 bool CanTodoType =
true;
178 unsigned TodoTypeSz = 0;
179 DenseMap<Value *, bool> TodoType;
180 void insertTodoType(
Value *
Op) {
182 if (CanTodoType && !isaGEP(
Op)) {
183 auto It = TodoType.try_emplace(
Op,
true);
188 void eraseTodoType(
Value *
Op) {
189 auto It = TodoType.find(
Op);
190 if (It != TodoType.end() && It->second) {
198 auto It = TodoType.find(
Op);
199 return It != TodoType.end() && It->second;
203 std::unordered_set<Instruction *> TypeValidated;
206 enum WellKnownTypes { Event };
209 Type *deduceElementType(
Value *
I,
bool UnknownElemTypeI8);
210 Type *deduceElementTypeHelper(
Value *
I,
bool UnknownElemTypeI8);
211 Type *deduceElementTypeHelper(
Value *
I, std::unordered_set<Value *> &Visited,
212 bool UnknownElemTypeI8,
213 bool IgnoreKnownType =
false);
214 Type *deduceElementTypeByValueDeep(
Type *ValueTy,
Value *Operand,
215 bool UnknownElemTypeI8);
216 Type *deduceElementTypeByValueDeep(
Type *ValueTy,
Value *Operand,
217 std::unordered_set<Value *> &Visited,
218 bool UnknownElemTypeI8);
220 std::unordered_set<Value *> &Visited,
221 bool UnknownElemTypeI8);
223 bool UnknownElemTypeI8);
226 Type *deduceNestedTypeHelper(User *U,
bool UnknownElemTypeI8);
227 Type *deduceNestedTypeHelper(User *U,
Type *Ty,
228 std::unordered_set<Value *> &Visited,
229 bool UnknownElemTypeI8);
232 void deduceOperandElementType(Instruction *
I,
233 SmallPtrSet<Instruction *, 4> *IncompleteRets,
234 const SmallPtrSet<Value *, 4> *AskOps =
nullptr,
235 bool IsPostprocessing =
false);
240 Type *reconstructType(
Value *
Op,
bool UnknownElemTypeI8,
241 bool IsPostprocessing);
243 void replaceMemInstrUses(Instruction *Old, Instruction *New,
IRBuilder<> &
B);
245 bool insertAssignPtrTypeIntrs(Instruction *
I,
IRBuilder<> &
B,
246 bool UnknownElemTypeI8);
248 void insertAssignPtrTypeTargetExt(TargetExtType *AssignedType,
Value *V,
250 void replacePointerOperandWithPtrCast(Instruction *
I,
Value *Pointer,
251 Type *ExpectedElementType,
252 unsigned OperandToReplace,
254 void insertPtrCastOrAssignTypeInstr(Instruction *
I,
IRBuilder<> &
B);
255 bool shouldTryToAddMemAliasingDecoration(Instruction *Inst);
257 void insertConstantsForFPFastMathDefault(
Module &M);
258 void processGlobalValue(GlobalVariable &GV,
IRBuilder<> &
B);
260 void processParamTypesByFunHeader(Function *
F,
IRBuilder<> &
B);
261 Type *deduceFunParamElementType(Function *
F,
unsigned OpIdx);
262 Type *deduceFunParamElementType(Function *
F,
unsigned OpIdx,
263 std::unordered_set<Function *> &FVisited);
265 bool deduceOperandElementTypeCalledFunction(
267 Type *&KnownElemTy,
bool &Incomplete);
268 void deduceOperandElementTypeFunctionPointer(
270 Type *&KnownElemTy,
bool IsPostprocessing);
271 bool deduceOperandElementTypeFunctionRet(
272 Instruction *
I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
273 const SmallPtrSet<Value *, 4> *AskOps,
bool IsPostprocessing,
276 CallInst *buildSpvPtrcast(Function *
F,
Value *
Op,
Type *ElemTy);
277 void replaceUsesOfWithSpvPtrcast(
Value *
Op,
Type *ElemTy, Instruction *
I,
278 DenseMap<Function *, CallInst *> Ptrcasts);
280 DenseSet<std::pair<Value *, Value *>> &VisitedSubst);
283 DenseSet<std::pair<Value *, Value *>> &VisitedSubst);
284 void propagateElemTypeRec(
Value *
Op,
Type *PtrElemTy,
Type *CastElemTy,
285 DenseSet<std::pair<Value *, Value *>> &VisitedSubst,
286 std::unordered_set<Value *> &Visited,
287 DenseMap<Function *, CallInst *> Ptrcasts);
290 void replaceAllUsesWithAndErase(
IRBuilder<> &
B, Instruction *Src,
291 Instruction *Dest,
bool DeleteOld =
true);
295 GetElementPtrInst *simplifyZeroLengthArrayGepInst(GetElementPtrInst *
GEP);
298 bool postprocessTypes(
Module &M);
299 bool processFunctionPointers(
Module &M);
300 void parseFunDeclarations(
Module &M);
301 void useRoundingMode(ConstrainedFPIntrinsic *FPI,
IRBuilder<> &
B);
302 bool processMaskedMemIntrinsic(IntrinsicInst &
I);
303 bool convertMaskedMemIntrinsics(
Module &M);
305 void emitUnstructuredLoopControls(Function &
F,
IRBuilder<> &
B);
321 bool walkLogicalAccessChain(
322 GetElementPtrInst &
GEP,
323 const std::function<
void(
Type *PointedType, uint64_t Index)>
332 Type *getGEPType(GetElementPtrInst *
GEP);
339 Type *getGEPTypeLogical(GetElementPtrInst *
GEP);
341 Instruction *buildLogicalAccessChainFromGEP(GetElementPtrInst &
GEP);
345 SPIRVEmitIntrinsics(SPIRVTargetMachine *TM =
nullptr)
346 : ModulePass(ID), TM(TM) {}
349 Instruction *visitGetElementPtrInst(GetElementPtrInst &
I);
352 Instruction *visitInsertElementInst(InsertElementInst &
I);
353 Instruction *visitExtractElementInst(ExtractElementInst &
I);
355 Instruction *visitExtractValueInst(ExtractValueInst &
I);
359 Instruction *visitAtomicCmpXchgInst(AtomicCmpXchgInst &
I);
363 StringRef getPassName()
const override {
return "SPIRV emit intrinsics"; }
365 bool runOnModule(
Module &M)
override;
367 void getAnalysisUsage(AnalysisUsage &AU)
const override {
368 ModulePass::getAnalysisUsage(AU);
377 return II->getIntrinsicID() == Intrinsic::experimental_convergence_entry ||
378 II->getIntrinsicID() == Intrinsic::experimental_convergence_loop ||
379 II->getIntrinsicID() == Intrinsic::experimental_convergence_anchor;
382bool expectIgnoredInIRTranslation(
const Instruction *
I) {
386 switch (
II->getIntrinsicID()) {
387 case Intrinsic::invariant_start:
388 case Intrinsic::spv_resource_handlefrombinding:
389 case Intrinsic::spv_resource_getpointer:
399 if (
II->getIntrinsicID() == Intrinsic::spv_ptrcast) {
400 Value *V =
II->getArgOperand(0);
401 return getPointerRoot(V);
409char SPIRVEmitIntrinsics::ID = 0;
432 B.SetInsertPoint(
I->getParent()->getFirstNonPHIOrDbgOrAlloca());
438 B.SetCurrentDebugLocation(
I->getDebugLoc());
439 if (
I->getType()->isVoidTy())
440 B.SetInsertPoint(
I->getNextNode());
442 B.SetInsertPoint(*
I->getInsertionPointAfterDef());
447 switch (Intr->getIntrinsicID()) {
448 case Intrinsic::invariant_start:
449 case Intrinsic::invariant_end:
457 if (
I->getType()->isTokenTy())
459 "does not support token type",
464 if (!
I->hasName() ||
I->getType()->isAggregateType() ||
465 expectIgnoredInIRTranslation(
I))
476 if (
F &&
F->getName().starts_with(
"llvm.spv.alloca"))
487 std::vector<Value *> Args = {
490 B.CreateIntrinsic(Intrinsic::spv_assign_name, {
I->getType()}, Args);
493void SPIRVEmitIntrinsics::replaceAllUsesWith(
Value *Src,
Value *Dest,
497 if (isTodoType(Src)) {
500 insertTodoType(Dest);
504void SPIRVEmitIntrinsics::replaceAllUsesWithAndErase(
IRBuilder<> &
B,
509 std::string
Name = Src->hasName() ? Src->getName().str() :
"";
510 Src->eraseFromParent();
513 if (Named.insert(Dest).second)
538Type *SPIRVEmitIntrinsics::reconstructType(
Value *
Op,
bool UnknownElemTypeI8,
539 bool IsPostprocessing) {
554 if (UnknownElemTypeI8) {
555 if (!IsPostprocessing)
563CallInst *SPIRVEmitIntrinsics::buildSpvPtrcast(Function *
F,
Value *
Op,
571 B.SetInsertPointPastAllocas(OpA->getParent());
574 B.SetInsertPoint(
F->getEntryBlock().getFirstNonPHIOrDbgOrAlloca());
576 Type *OpTy =
Op->getType();
580 CallInst *PtrCasted =
581 B.CreateIntrinsic(Intrinsic::spv_ptrcast, {
Types},
Args);
586void SPIRVEmitIntrinsics::replaceUsesOfWithSpvPtrcast(
588 DenseMap<Function *, CallInst *> Ptrcasts) {
590 CallInst *PtrCastedI =
nullptr;
591 auto It = Ptrcasts.
find(
F);
592 if (It == Ptrcasts.
end()) {
593 PtrCastedI = buildSpvPtrcast(
F,
Op, ElemTy);
594 Ptrcasts[
F] = PtrCastedI;
596 PtrCastedI = It->second;
598 I->replaceUsesOfWith(
Op, PtrCastedI);
601void SPIRVEmitIntrinsics::propagateElemType(
603 DenseSet<std::pair<Value *, Value *>> &VisitedSubst) {
604 DenseMap<Function *, CallInst *> Ptrcasts;
606 for (
auto *U :
Users) {
609 if (!VisitedSubst.insert(std::make_pair(U,
Op)).second)
614 if (isaGEP(UI) || TypeValidated.find(UI) != TypeValidated.end())
615 replaceUsesOfWithSpvPtrcast(
Op, ElemTy, UI, Ptrcasts);
619void SPIRVEmitIntrinsics::propagateElemTypeRec(
621 DenseSet<std::pair<Value *, Value *>> &VisitedSubst) {
622 std::unordered_set<Value *> Visited;
623 DenseMap<Function *, CallInst *> Ptrcasts;
624 propagateElemTypeRec(
Op, PtrElemTy, CastElemTy, VisitedSubst, Visited,
625 std::move(Ptrcasts));
628void SPIRVEmitIntrinsics::propagateElemTypeRec(
630 DenseSet<std::pair<Value *, Value *>> &VisitedSubst,
631 std::unordered_set<Value *> &Visited,
632 DenseMap<Function *, CallInst *> Ptrcasts) {
633 if (!Visited.insert(
Op).second)
636 for (
auto *U :
Users) {
639 if (!VisitedSubst.insert(std::make_pair(U,
Op)).second)
644 if (isaGEP(UI) || TypeValidated.find(UI) != TypeValidated.end())
645 replaceUsesOfWithSpvPtrcast(
Op, CastElemTy, UI, Ptrcasts);
653SPIRVEmitIntrinsics::deduceElementTypeByValueDeep(
Type *ValueTy,
Value *Operand,
654 bool UnknownElemTypeI8) {
655 std::unordered_set<Value *> Visited;
656 return deduceElementTypeByValueDeep(ValueTy, Operand, Visited,
660Type *SPIRVEmitIntrinsics::deduceElementTypeByValueDeep(
661 Type *ValueTy,
Value *Operand, std::unordered_set<Value *> &Visited,
662 bool UnknownElemTypeI8) {
667 deduceElementTypeHelper(Operand, Visited, UnknownElemTypeI8))
678Type *SPIRVEmitIntrinsics::deduceElementTypeByUsersDeep(
679 Value *
Op, std::unordered_set<Value *> &Visited,
bool UnknownElemTypeI8) {
691 for (User *OpU :
Op->users()) {
693 if (
Type *Ty = deduceElementTypeHelper(Inst, Visited, UnknownElemTypeI8))
706 if ((DemangledName.
starts_with(
"__spirv_ocl_printf(") ||
715Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(
Value *
I,
716 bool UnknownElemTypeI8) {
717 std::unordered_set<Value *> Visited;
718 return deduceElementTypeHelper(
I, Visited, UnknownElemTypeI8);
721void SPIRVEmitIntrinsics::maybeAssignPtrType(
Type *&Ty,
Value *
Op,
Type *RefTy,
722 bool UnknownElemTypeI8) {
724 if (!UnknownElemTypeI8)
731bool SPIRVEmitIntrinsics::walkLogicalAccessChain(
732 GetElementPtrInst &
GEP,
733 const std::function<
void(
Type *, uint64_t)> &OnLiteralIndexing,
734 const std::function<
void(
Type *,
Value *)> &OnDynamicIndexing) {
742 Value *Src = getPointerRoot(
GEP.getPointerOperand());
743 Type *CurType = deduceElementType(Src,
true);
752 OnDynamicIndexing(AT->getElementType(), Operand);
753 return AT ==
nullptr;
761 uint32_t EltTypeSize =
DL.getTypeSizeInBits(AT->getElementType()) / 8;
765 CurType = AT->getElementType();
766 OnLiteralIndexing(CurType, Index);
768 uint32_t StructSize =
DL.getTypeSizeInBits(ST) / 8;
771 const auto &STL =
DL.getStructLayout(ST);
772 unsigned Element = STL->getElementContainingOffset(
Offset);
773 Offset -= STL->getElementOffset(Element);
774 CurType =
ST->getElementType(Element);
775 OnLiteralIndexing(CurType, Element);
777 Type *EltTy = VT->getElementType();
778 TypeSize EltSizeBits =
DL.getTypeSizeInBits(EltTy);
779 assert(EltSizeBits % 8 == 0 &&
780 "Element type size in bits must be a multiple of 8.");
781 uint32_t EltTypeSize = EltSizeBits / 8;
786 OnLiteralIndexing(CurType, Index);
798SPIRVEmitIntrinsics::buildLogicalAccessChainFromGEP(GetElementPtrInst &
GEP) {
801 B.SetInsertPoint(&
GEP);
803 std::vector<Value *> Indices;
804 Indices.push_back(ConstantInt::get(
805 IntegerType::getInt32Ty(CurrF->
getContext()), 0,
false));
806 walkLogicalAccessChain(
808 [&Indices, &
B](
Type *EltType, uint64_t Index) {
810 ConstantInt::get(
B.getInt64Ty(), Index,
false));
813 uint32_t EltTypeSize =
DL.getTypeSizeInBits(EltType) / 8;
815 Offset, ConstantInt::get(
Offset->getType(), EltTypeSize,
817 Indices.push_back(Index);
821 SmallVector<Value *, 4>
Args;
822 Args.push_back(
B.getInt1(
GEP.isInBounds()));
823 Args.push_back(
GEP.getOperand(0));
825 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_gep, {
Types}, {
Args});
826 replaceAllUsesWithAndErase(
B, &
GEP, NewI);
830Type *SPIRVEmitIntrinsics::getGEPTypeLogical(GetElementPtrInst *
GEP) {
832 Type *CurType =
GEP->getResultElementType();
834 bool Interrupted = walkLogicalAccessChain(
835 *
GEP, [&CurType](
Type *EltType, uint64_t Index) { CurType = EltType; },
838 return Interrupted ?
GEP->getResultElementType() : CurType;
841Type *SPIRVEmitIntrinsics::getGEPType(GetElementPtrInst *
Ref) {
842 if (
Ref->getSourceElementType() ==
843 IntegerType::getInt8Ty(CurrF->
getContext()) &&
845 return getGEPTypeLogical(
Ref);
852 Ty =
Ref->getSourceElementType();
856 Ty =
Ref->getResultElementType();
861Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(
862 Value *
I, std::unordered_set<Value *> &Visited,
bool UnknownElemTypeI8,
863 bool IgnoreKnownType) {
869 if (!IgnoreKnownType)
874 if (!Visited.insert(
I).second)
881 maybeAssignPtrType(Ty,
I,
Ref->getAllocatedType(), UnknownElemTypeI8);
883 Ty = getGEPType(
Ref);
885 Ty = SGEP->getResultElementType();
890 KnownTy =
Op->getType();
892 maybeAssignPtrType(Ty,
I, ElemTy, UnknownElemTypeI8);
895 Ty = SPIRV::getOriginalFunctionType(*Fn);
898 Ty = deduceElementTypeByValueDeep(
900 Ref->getNumOperands() > 0 ?
Ref->getOperand(0) :
nullptr, Visited,
904 Type *RefTy = deduceElementTypeHelper(
Ref->getPointerOperand(), Visited,
906 maybeAssignPtrType(Ty,
I, RefTy, UnknownElemTypeI8);
908 maybeAssignPtrType(Ty,
I,
Ref->getDestTy(), UnknownElemTypeI8);
910 if (
Type *Src =
Ref->getSrcTy(), *Dest =
Ref->getDestTy();
912 Ty = deduceElementTypeHelper(
Ref->getOperand(0), Visited,
917 Ty = deduceElementTypeHelper(
Op, Visited, UnknownElemTypeI8);
921 Ty = deduceElementTypeHelper(
Op, Visited, UnknownElemTypeI8);
923 Type *BestTy =
nullptr;
925 DenseMap<Type *, unsigned> PhiTys;
926 for (
int i =
Ref->getNumIncomingValues() - 1; i >= 0; --i) {
927 Ty = deduceElementTypeByUsersDeep(
Ref->getIncomingValue(i), Visited,
934 if (It.first->second > MaxN) {
935 MaxN = It.first->second;
943 for (
Value *
Op : {
Ref->getTrueValue(),
Ref->getFalseValue()}) {
944 Ty = deduceElementTypeByUsersDeep(
Op, Visited, UnknownElemTypeI8);
949 static StringMap<unsigned> ResTypeByArg = {
953 {
"__spirv_GenericCastToPtr_ToGlobal", 0},
954 {
"__spirv_GenericCastToPtr_ToLocal", 0},
955 {
"__spirv_GenericCastToPtr_ToPrivate", 0},
956 {
"__spirv_GenericCastToPtrExplicit_ToGlobal", 0},
957 {
"__spirv_GenericCastToPtrExplicit_ToLocal", 0},
958 {
"__spirv_GenericCastToPtrExplicit_ToPrivate", 0}};
962 if (
II &&
II->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
964 if (HandleType->getTargetExtName() ==
"spirv.Image" ||
965 HandleType->getTargetExtName() ==
"spirv.SignedImage") {
966 for (User *U :
II->users()) {
971 }
else if (HandleType->getTargetExtName() ==
"spirv.VulkanBuffer") {
973 Ty = HandleType->getTypeParameter(0);
985 }
else if (
II &&
II->getIntrinsicID() ==
986 Intrinsic::spv_generic_cast_to_ptr_explicit) {
987 Ty = deduceElementTypeHelper(CI->getArgOperand(0), Visited,
989 }
else if (Function *CalledF = CI->getCalledFunction()) {
990 std::string DemangledName =
992 if (DemangledName.length() > 0)
993 DemangledName = SPIRV::lookupBuiltinNameHelper(DemangledName);
994 auto AsArgIt = ResTypeByArg.
find(DemangledName);
995 if (AsArgIt != ResTypeByArg.
end())
996 Ty = deduceElementTypeHelper(CI->getArgOperand(AsArgIt->second),
997 Visited, UnknownElemTypeI8);
1004 if (Ty && !IgnoreKnownType) {
1015Type *SPIRVEmitIntrinsics::deduceNestedTypeHelper(User *U,
1016 bool UnknownElemTypeI8) {
1017 std::unordered_set<Value *> Visited;
1018 return deduceNestedTypeHelper(U,
U->getType(), Visited, UnknownElemTypeI8);
1021Type *SPIRVEmitIntrinsics::deduceNestedTypeHelper(
1022 User *U,
Type *OrigTy, std::unordered_set<Value *> &Visited,
1023 bool UnknownElemTypeI8) {
1032 if (!Visited.insert(U).second)
1037 bool Change =
false;
1038 for (
unsigned i = 0; i <
U->getNumOperands(); ++i) {
1040 assert(
Op &&
"Operands should not be null.");
1041 Type *OpTy =
Op->getType();
1044 if (
Type *NestedTy =
1045 deduceElementTypeHelper(
Op, Visited, UnknownElemTypeI8))
1052 Change |= Ty != OpTy;
1060 if (
Value *
Op =
U->getNumOperands() > 0 ?
U->getOperand(0) :
nullptr) {
1061 Type *OpTy = ArrTy->getElementType();
1064 if (
Type *NestedTy =
1065 deduceElementTypeHelper(
Op, Visited, UnknownElemTypeI8))
1072 Type *NewTy = ArrayType::get(Ty, ArrTy->getNumElements());
1078 if (
Value *
Op =
U->getNumOperands() > 0 ?
U->getOperand(0) :
nullptr) {
1079 Type *OpTy = VecTy->getElementType();
1082 if (
Type *NestedTy =
1083 deduceElementTypeHelper(
Op, Visited, UnknownElemTypeI8))
1090 Type *NewTy = VectorType::get(Ty, VecTy->getElementCount());
1100Type *SPIRVEmitIntrinsics::deduceElementType(
Value *
I,
bool UnknownElemTypeI8) {
1101 if (
Type *Ty = deduceElementTypeHelper(
I, UnknownElemTypeI8))
1103 if (!UnknownElemTypeI8)
1106 return IntegerType::getInt8Ty(
I->getContext());
1110 Value *PointerOperand) {
1116 return I->getType();
1124bool SPIRVEmitIntrinsics::deduceOperandElementTypeCalledFunction(
1126 Type *&KnownElemTy,
bool &Incomplete) {
1130 std::string DemangledName =
1132 if (DemangledName.length() > 0 &&
1134 const SPIRVSubtarget &
ST = TM->
getSubtarget<SPIRVSubtarget>(*CalledF);
1135 auto [Grp, Opcode, ExtNo] = SPIRV::mapBuiltinToOpcode(
1136 DemangledName,
ST.getPreferredInstructionSet());
1137 if (Opcode == SPIRV::OpGroupAsyncCopy) {
1138 for (
unsigned i = 0, PtrCnt = 0; i < CI->
arg_size() && PtrCnt < 2; ++i) {
1144 KnownElemTy = ElemTy;
1145 Ops.push_back(std::make_pair(
Op, i));
1147 }
else if (Grp == SPIRV::Atomic || Grp == SPIRV::AtomicFloating) {
1154 case SPIRV::OpAtomicFAddEXT:
1155 case SPIRV::OpAtomicFMinEXT:
1156 case SPIRV::OpAtomicFMaxEXT:
1157 case SPIRV::OpAtomicLoad:
1158 case SPIRV::OpAtomicCompareExchangeWeak:
1159 case SPIRV::OpAtomicCompareExchange:
1160 case SPIRV::OpAtomicExchange:
1161 case SPIRV::OpAtomicIAdd:
1162 case SPIRV::OpAtomicISub:
1163 case SPIRV::OpAtomicOr:
1164 case SPIRV::OpAtomicXor:
1165 case SPIRV::OpAtomicAnd:
1166 case SPIRV::OpAtomicUMin:
1167 case SPIRV::OpAtomicUMax:
1168 case SPIRV::OpAtomicSMin:
1169 case SPIRV::OpAtomicSMax: {
1174 Incomplete = isTodoType(
Op);
1175 Ops.push_back(std::make_pair(
Op, 0));
1177 case SPIRV::OpAtomicStore: {
1186 Incomplete = isTodoType(
Op);
1187 Ops.push_back(std::make_pair(
Op, 0));
1196void SPIRVEmitIntrinsics::deduceOperandElementTypeFunctionPointer(
1198 Type *&KnownElemTy,
bool IsPostprocessing) {
1202 Ops.push_back(std::make_pair(
Op, std::numeric_limits<unsigned>::max()));
1203 FunctionType *FTy = SPIRV::getOriginalFunctionType(*CI);
1204 bool IsNewFTy =
false, IsIncomplete =
false;
1207 Type *ArgTy = Arg->getType();
1212 if (isTodoType(Arg))
1213 IsIncomplete =
true;
1215 IsIncomplete =
true;
1218 ArgTy = FTy->getFunctionParamType(ParmIdx);
1222 Type *RetTy = FTy->getReturnType();
1229 IsIncomplete =
true;
1231 IsIncomplete =
true;
1234 if (!IsPostprocessing && IsIncomplete)
1237 IsNewFTy ? FunctionType::get(RetTy, ArgTys, FTy->isVarArg()) : FTy;
1240bool SPIRVEmitIntrinsics::deduceOperandElementTypeFunctionRet(
1241 Instruction *
I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
1242 const SmallPtrSet<Value *, 4> *AskOps,
bool IsPostprocessing,
1254 DenseSet<std::pair<Value *, Value *>> VisitedSubst{std::make_pair(
I,
Op)};
1255 for (User *U :
F->users()) {
1263 propagateElemType(CI, PrevElemTy, VisitedSubst);
1273 for (Instruction *IncompleteRetI : *IncompleteRets)
1274 deduceOperandElementType(IncompleteRetI,
nullptr, AskOps,
1276 }
else if (IncompleteRets) {
1279 TypeValidated.insert(
I);
1287void SPIRVEmitIntrinsics::deduceOperandElementType(
1288 Instruction *
I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
1289 const SmallPtrSet<Value *, 4> *AskOps,
bool IsPostprocessing) {
1291 Type *KnownElemTy =
nullptr;
1292 bool Incomplete =
false;
1298 Incomplete = isTodoType(
I);
1299 for (
unsigned i = 0; i <
Ref->getNumIncomingValues(); i++) {
1302 Ops.push_back(std::make_pair(
Op, i));
1308 Incomplete = isTodoType(
I);
1309 Ops.push_back(std::make_pair(
Ref->getPointerOperand(), 0));
1316 Incomplete = isTodoType(
I);
1317 Ops.push_back(std::make_pair(
Ref->getOperand(0), 0));
1321 KnownElemTy =
Ref->getSourceElementType();
1322 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1327 KnownElemTy =
Ref->getBaseType();
1328 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1331 KnownElemTy =
I->getType();
1337 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1341 reconstructType(
Ref->getValueOperand(),
false, IsPostprocessing)))
1346 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1354 Incomplete = isTodoType(
Ref->getPointerOperand());
1355 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1363 Incomplete = isTodoType(
Ref->getPointerOperand());
1364 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1370 Incomplete = isTodoType(
I);
1371 for (
unsigned i = 0; i <
Ref->getNumOperands(); i++) {
1374 Ops.push_back(std::make_pair(
Op, i));
1382 if (deduceOperandElementTypeFunctionRet(
I, IncompleteRets, AskOps,
1383 IsPostprocessing, KnownElemTy,
Op,
1386 Incomplete = isTodoType(CurrF);
1387 Ops.push_back(std::make_pair(
Op, 0));
1393 bool Incomplete0 = isTodoType(Op0);
1394 bool Incomplete1 = isTodoType(Op1);
1396 Type *ElemTy0 = (Incomplete0 && !Incomplete1 && ElemTy1)
1398 : GR->findDeducedElementType(Op0);
1400 KnownElemTy = ElemTy0;
1401 Incomplete = Incomplete0;
1402 Ops.push_back(std::make_pair(Op1, 1));
1403 }
else if (ElemTy1) {
1404 KnownElemTy = ElemTy1;
1405 Incomplete = Incomplete1;
1406 Ops.push_back(std::make_pair(Op0, 0));
1410 deduceOperandElementTypeCalledFunction(CI,
Ops, KnownElemTy, Incomplete);
1411 else if (HaveFunPtrs)
1412 deduceOperandElementTypeFunctionPointer(CI,
Ops, KnownElemTy,
1417 if (!KnownElemTy ||
Ops.size() == 0)
1422 for (
auto &OpIt :
Ops) {
1426 Type *AskTy =
nullptr;
1427 CallInst *AskCI =
nullptr;
1428 if (IsPostprocessing && AskOps) {
1434 if (Ty == KnownElemTy)
1437 Type *OpTy =
Op->getType();
1438 if (
Op->hasUseList() &&
1445 else if (!IsPostprocessing)
1449 if (AssignCI ==
nullptr) {
1458 DenseSet<std::pair<Value *, Value *>> VisitedSubst{
1459 std::make_pair(
I,
Op)};
1460 propagateElemTypeRec(
Op, KnownElemTy, PrevElemTy, VisitedSubst);
1464 CallInst *PtrCastI =
1465 buildSpvPtrcast(
I->getParent()->getParent(),
Op, KnownElemTy);
1466 if (OpIt.second == std::numeric_limits<unsigned>::max())
1469 I->setOperand(OpIt.second, PtrCastI);
1472 TypeValidated.insert(
I);
1475void SPIRVEmitIntrinsics::replaceMemInstrUses(Instruction *Old,
1480 if (isAssignTypeInstr(U)) {
1481 B.SetInsertPoint(U);
1482 SmallVector<Value *, 2>
Args = {
New,
U->getOperand(1)};
1483 CallInst *AssignCI =
1484 B.CreateIntrinsic(Intrinsic::spv_assign_type, {
New->getType()},
Args);
1486 U->eraseFromParent();
1489 U->replaceUsesOfWith(Old, New);
1494 New->copyMetadata(*Old);
1498void SPIRVEmitIntrinsics::preprocessUndefs(
IRBuilder<> &
B) {
1499 std::queue<Instruction *> Worklist;
1503 while (!Worklist.empty()) {
1505 bool BPrepared =
false;
1508 for (
auto &
Op :
I->operands()) {
1510 if (!AggrUndef || !
Op->getType()->isAggregateType())
1517 auto *IntrUndef =
B.CreateIntrinsic(Intrinsic::spv_undef, {});
1518 Worklist.push(IntrUndef);
1519 I->replaceUsesOfWith(
Op, IntrUndef);
1520 AggrConsts[IntrUndef] = AggrUndef;
1521 AggrConstTypes[IntrUndef] = AggrUndef->getType();
1526void SPIRVEmitIntrinsics::preprocessCompositeConstants(
IRBuilder<> &
B) {
1527 std::queue<Instruction *> Worklist;
1531 while (!Worklist.empty()) {
1532 auto *
I = Worklist.front();
1535 bool KeepInst =
false;
1536 for (
const auto &
Op :
I->operands()) {
1538 Type *ResTy =
nullptr;
1541 ResTy = COp->getType();
1553 ResTy =
Op->getType()->isVectorTy() ? COp->getType() :
B.getInt32Ty();
1558 for (
unsigned i = 0; i < COp->getNumElements(); ++i)
1559 Args.push_back(COp->getElementAsConstant(i));
1563 IsPhi ?
B.SetInsertPointPastAllocas(
I->getParent()->getParent())
1564 :
B.SetInsertPoint(
I);
1568 B.CreateIntrinsic(Intrinsic::spv_const_composite, {ResTy}, {
Args});
1572 AggrConsts[CI] = AggrConst;
1573 AggrConstTypes[CI] = deduceNestedTypeHelper(AggrConst,
false);
1585 B.CreateIntrinsic(Intrinsic::spv_assign_decoration, {
I->getType()},
1590 unsigned RoundingModeDeco,
1597 ConstantInt::get(
Int32Ty, SPIRV::Decoration::FPRoundingMode)),
1606 MDNode *SaturatedConversionNode =
1608 Int32Ty, SPIRV::Decoration::SaturatedConversion))});
1615 if (Fu->isIntrinsic()) {
1616 unsigned const int IntrinsicId = Fu->getIntrinsicID();
1617 switch (IntrinsicId) {
1618 case Intrinsic::fptosi_sat:
1619 case Intrinsic::fptoui_sat:
1638 MDString *ConstraintString =
MDString::get(Ctx,
IA->getConstraintString());
1646 B.SetInsertPoint(&
Call);
1647 B.CreateIntrinsic(Intrinsic::spv_inline_asm, {
Args});
1652void SPIRVEmitIntrinsics::useRoundingMode(ConstrainedFPIntrinsic *FPI,
1655 if (!
RM.has_value())
1657 unsigned RoundingModeDeco = std::numeric_limits<unsigned>::max();
1658 switch (
RM.value()) {
1662 case RoundingMode::NearestTiesToEven:
1663 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTE;
1665 case RoundingMode::TowardNegative:
1666 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTN;
1668 case RoundingMode::TowardPositive:
1669 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTP;
1671 case RoundingMode::TowardZero:
1672 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTZ;
1674 case RoundingMode::Dynamic:
1675 case RoundingMode::NearestTiesToAway:
1679 if (RoundingModeDeco == std::numeric_limits<unsigned>::max())
1685Instruction *SPIRVEmitIntrinsics::visitSwitchInst(SwitchInst &
I) {
1689 B.SetInsertPoint(&
I);
1690 SmallVector<Value *, 4>
Args;
1692 Args.push_back(
I.getCondition());
1695 for (
auto &Case :
I.cases()) {
1696 Args.push_back(Case.getCaseValue());
1697 BBCases.
push_back(Case.getCaseSuccessor());
1700 CallInst *NewI =
B.CreateIntrinsic(Intrinsic::spv_switch,
1701 {
I.getOperand(0)->getType()}, {
Args});
1705 I.eraseFromParent();
1708 B.SetInsertPoint(ParentBB);
1709 IndirectBrInst *BrI =
B.CreateIndirectBr(
1712 for (BasicBlock *BBCase : BBCases)
1718 if (
GEP->getNumIndices() == 0)
1721 return CI->getZExtValue() == 0;
1726Instruction *SPIRVEmitIntrinsics::visitIntrinsicInst(IntrinsicInst &
I) {
1732 B.SetInsertPoint(&
I);
1734 SmallVector<Value *, 4>
Args;
1735 Args.push_back(
B.getInt1(
true));
1736 Args.push_back(
I.getOperand(0));
1737 Args.push_back(
B.getInt32(0));
1738 for (
unsigned J = 0; J < SGEP->getNumIndices(); ++J)
1739 Args.push_back(SGEP->getIndexOperand(J));
1741 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_gep, Types, Args);
1742 replaceAllUsesWithAndErase(
B, &
I, NewI);
1746Instruction *SPIRVEmitIntrinsics::visitGetElementPtrInst(GetElementPtrInst &
I) {
1748 B.SetInsertPoint(&
I);
1756 if (
I.getSourceElementType() ==
1757 IntegerType::getInt8Ty(CurrF->
getContext())) {
1758 return buildLogicalAccessChainFromGEP(
I);
1763 Value *PtrOp =
I.getPointerOperand();
1764 Type *SrcElemTy =
I.getSourceElementType();
1765 Type *DeducedPointeeTy = deduceElementType(PtrOp,
true);
1768 if (ArrTy->getElementType() == SrcElemTy) {
1770 Type *FirstIdxType =
I.getOperand(1)->getType();
1771 NewIndices.
push_back(ConstantInt::get(FirstIdxType, 0));
1772 for (
Value *Idx :
I.indices())
1776 SmallVector<Value *, 4>
Args;
1777 Args.push_back(
B.getInt1(
I.isInBounds()));
1778 Args.push_back(
I.getPointerOperand());
1781 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_gep, {
Types}, {
Args});
1782 replaceAllUsesWithAndErase(
B, &
I, NewI);
1789 SmallVector<Value *, 4>
Args;
1790 Args.push_back(
B.getInt1(
I.isInBounds()));
1792 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_gep, {
Types}, {
Args});
1793 replaceAllUsesWithAndErase(
B, &
I, NewI);
1797Instruction *SPIRVEmitIntrinsics::visitBitCastInst(BitCastInst &
I) {
1799 B.SetInsertPoint(&
I);
1808 I.eraseFromParent();
1814 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_bitcast, {
Types}, {
Args});
1815 replaceAllUsesWithAndErase(
B, &
I, NewI);
1819void SPIRVEmitIntrinsics::insertAssignPtrTypeTargetExt(
1821 Type *VTy =
V->getType();
1826 if (ElemTy != AssignedType)
1839 if (CurrentType == AssignedType)
1846 " for value " +
V->getName(),
1854void SPIRVEmitIntrinsics::replacePointerOperandWithPtrCast(
1855 Instruction *
I,
Value *Pointer,
Type *ExpectedElementType,
1857 TypeValidated.insert(
I);
1860 Type *PointerElemTy = deduceElementTypeHelper(Pointer,
false);
1861 if (PointerElemTy == ExpectedElementType ||
1867 MetadataAsValue *VMD =
buildMD(ExpectedElementVal);
1869 bool FirstPtrCastOrAssignPtrType =
true;
1875 for (
auto User :
Pointer->users()) {
1878 (
II->getIntrinsicID() != Intrinsic::spv_assign_ptr_type &&
1879 II->getIntrinsicID() != Intrinsic::spv_ptrcast) ||
1880 II->getOperand(0) != Pointer)
1885 FirstPtrCastOrAssignPtrType =
false;
1886 if (
II->getOperand(1) != VMD ||
1893 if (
II->getIntrinsicID() != Intrinsic::spv_ptrcast)
1898 if (
II->getParent() !=
I->getParent())
1901 I->setOperand(OperandToReplace,
II);
1907 if (FirstPtrCastOrAssignPtrType) {
1912 }
else if (isTodoType(Pointer)) {
1913 eraseTodoType(Pointer);
1921 DenseSet<std::pair<Value *, Value *>> VisitedSubst{
1922 std::make_pair(
I, Pointer)};
1924 propagateElemType(Pointer, PrevElemTy, VisitedSubst);
1936 auto *PtrCastI =
B.CreateIntrinsic(Intrinsic::spv_ptrcast, {
Types},
Args);
1942void SPIRVEmitIntrinsics::insertPtrCastOrAssignTypeInstr(Instruction *
I,
1947 replacePointerOperandWithPtrCast(
1948 I,
SI->getValueOperand(), IntegerType::getInt8Ty(CurrF->
getContext()),
1954 Type *OpTy =
Op->getType();
1957 if (OpTy ==
Op->getType())
1958 OpTy = deduceElementTypeByValueDeep(OpTy,
Op,
false);
1959 replacePointerOperandWithPtrCast(
I, Pointer, OpTy, 1,
B);
1964 Type *OpTy = LI->getType();
1969 Type *NewOpTy = OpTy;
1970 OpTy = deduceElementTypeByValueDeep(OpTy, LI,
false);
1971 if (OpTy == NewOpTy)
1972 insertTodoType(Pointer);
1975 replacePointerOperandWithPtrCast(
I, Pointer, OpTy, 0,
B);
1980 Type *OpTy =
nullptr;
1992 OpTy = GEPI->getSourceElementType();
1994 replacePointerOperandWithPtrCast(
I, Pointer, OpTy, 0,
B);
1996 insertTodoType(Pointer);
2008 std::string DemangledName =
2012 bool HaveTypes =
false;
2030 for (User *U : CalledArg->
users()) {
2032 if ((ElemTy = deduceElementTypeHelper(Inst,
false)) !=
nullptr)
2038 HaveTypes |= ElemTy !=
nullptr;
2043 if (DemangledName.empty() && !HaveTypes)
2061 Type *ExpectedType =
2063 if (!ExpectedType && !DemangledName.empty())
2064 ExpectedType = SPIRV::parseBuiltinCallArgumentBaseType(
2065 DemangledName,
OpIdx,
I->getContext());
2066 if (!ExpectedType || ExpectedType->
isVoidTy())
2074 replacePointerOperandWithPtrCast(CI, ArgOperand, ExpectedType,
OpIdx,
B);
2078Instruction *SPIRVEmitIntrinsics::visitInsertElementInst(InsertElementInst &
I) {
2085 I.getOperand(1)->getType(),
2086 I.getOperand(2)->getType()};
2088 B.SetInsertPoint(&
I);
2090 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_insertelt, {
Types}, {
Args});
2091 replaceAllUsesWithAndErase(
B, &
I, NewI);
2096SPIRVEmitIntrinsics::visitExtractElementInst(ExtractElementInst &
I) {
2103 B.SetInsertPoint(&
I);
2105 I.getIndexOperand()->getType()};
2106 SmallVector<Value *, 2>
Args = {
I.getVectorOperand(),
I.getIndexOperand()};
2107 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_extractelt, {
Types}, {
Args});
2108 replaceAllUsesWithAndErase(
B, &
I, NewI);
2112Instruction *SPIRVEmitIntrinsics::visitInsertValueInst(InsertValueInst &
I) {
2114 B.SetInsertPoint(&
I);
2117 Value *AggregateOp =
I.getAggregateOperand();
2121 Args.push_back(AggregateOp);
2122 Args.push_back(
I.getInsertedValueOperand());
2123 for (
auto &
Op :
I.indices())
2124 Args.push_back(
B.getInt32(
Op));
2126 B.CreateIntrinsic(Intrinsic::spv_insertv, {
Types}, {
Args});
2127 replaceMemInstrUses(&
I, NewI,
B);
2131Instruction *SPIRVEmitIntrinsics::visitExtractValueInst(ExtractValueInst &
I) {
2132 if (
I.getAggregateOperand()->getType()->isAggregateType())
2135 B.SetInsertPoint(&
I);
2137 for (
auto &
Op :
I.indices())
2138 Args.push_back(
B.getInt32(
Op));
2140 B.CreateIntrinsic(Intrinsic::spv_extractv, {
I.getType()}, {
Args});
2141 replaceAllUsesWithAndErase(
B, &
I, NewI);
2145Instruction *SPIRVEmitIntrinsics::visitLoadInst(LoadInst &
I) {
2146 if (!
I.getType()->isAggregateType())
2149 B.SetInsertPoint(&
I);
2150 TrackConstants =
false;
2155 B.CreateIntrinsic(Intrinsic::spv_load, {
I.getOperand(0)->getType()},
2156 {
I.getPointerOperand(),
B.getInt16(Flags),
2157 B.getInt32(
I.getAlign().value())});
2158 replaceMemInstrUses(&
I, NewI,
B);
2162Instruction *SPIRVEmitIntrinsics::visitStoreInst(StoreInst &
I) {
2166 B.SetInsertPoint(&
I);
2167 TrackConstants =
false;
2171 auto *PtrOp =
I.getPointerOperand();
2173 if (
I.getValueOperand()->getType()->isAggregateType()) {
2181 "Unexpected argument of aggregate type, should be spv_extractv!");
2185 auto *NewI =
B.CreateIntrinsic(
2186 Intrinsic::spv_store, {
I.getValueOperand()->getType(), PtrOp->
getType()},
2187 {
I.getValueOperand(), PtrOp,
B.getInt16(Flags),
2188 B.getInt32(
I.getAlign().value())});
2190 I.eraseFromParent();
2194Instruction *SPIRVEmitIntrinsics::visitAllocaInst(AllocaInst &
I) {
2195 Value *ArraySize =
nullptr;
2196 if (
I.isArrayAllocation()) {
2199 SPIRV::Extension::SPV_INTEL_variable_length_array))
2201 "array allocation: this instruction requires the following "
2202 "SPIR-V extension: SPV_INTEL_variable_length_array",
2204 ArraySize =
I.getArraySize();
2207 B.SetInsertPoint(&
I);
2208 TrackConstants =
false;
2209 Type *PtrTy =
I.getType();
2212 ?
B.CreateIntrinsic(Intrinsic::spv_alloca_array,
2213 {PtrTy, ArraySize->
getType()},
2214 {ArraySize,
B.getInt32(
I.getAlign().value())})
2215 :
B.CreateIntrinsic(
Intrinsic::spv_alloca, {PtrTy},
2216 {
B.getInt32(
I.getAlign().value())});
2217 replaceAllUsesWithAndErase(
B, &
I, NewI);
2221Instruction *SPIRVEmitIntrinsics::visitAtomicCmpXchgInst(AtomicCmpXchgInst &
I) {
2222 assert(
I.getType()->isAggregateType() &&
"Aggregate result is expected");
2224 B.SetInsertPoint(&
I);
2226 Args.push_back(
B.getInt32(
2227 static_cast<uint32_t
>(
getMemScope(
I.getContext(),
I.getSyncScopeID()))));
2228 Args.push_back(
B.getInt32(
2230 Args.push_back(
B.getInt32(
2232 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_cmpxchg,
2233 {
I.getPointerOperand()->getType()}, {
Args});
2234 replaceMemInstrUses(&
I, NewI,
B);
2238Instruction *SPIRVEmitIntrinsics::visitUnreachableInst(UnreachableInst &
I) {
2240 B.SetInsertPoint(&
I);
2241 B.CreateIntrinsic(Intrinsic::spv_unreachable, {});
2250 static const StringSet<> ArtificialGlobals{
"llvm.global.annotations",
2251 "llvm.compiler.used",
"llvm.used"};
2256 auto &UserFunctions = GVUsers.getTransitiveUserFunctions(GV);
2257 if (UserFunctions.contains(
F))
2262 if (!UserFunctions.empty())
2267 const Module &M = *
F->getParent();
2268 const Function &FirstDefinition = *M.getFunctionDefs().
begin();
2269 return F == &FirstDefinition;
2272void SPIRVEmitIntrinsics::processGlobalValue(GlobalVariable &GV,
2283 deduceElementTypeHelper(&GV,
false);
2287 auto *InitInst =
B.CreateIntrinsic(Intrinsic::spv_init_global,
2289 InitInst->setArgOperand(1, Init);
2292 B.CreateIntrinsic(Intrinsic::spv_unref_global, GV.
getType(), &GV);
2298bool SPIRVEmitIntrinsics::insertAssignPtrTypeIntrs(Instruction *
I,
2300 bool UnknownElemTypeI8) {
2306 if (
Type *ElemTy = deduceElementType(
I, UnknownElemTypeI8)) {
2313void SPIRVEmitIntrinsics::insertAssignTypeIntrs(Instruction *
I,
2316 static StringMap<unsigned> ResTypeWellKnown = {
2317 {
"async_work_group_copy", WellKnownTypes::Event},
2318 {
"async_work_group_strided_copy", WellKnownTypes::Event},
2319 {
"__spirv_GroupAsyncCopy", WellKnownTypes::Event}};
2323 bool IsKnown =
false;
2328 std::string DemangledName =
2331 if (DemangledName.length() > 0)
2333 SPIRV::lookupBuiltinNameHelper(DemangledName, &DecorationId);
2334 auto ResIt = ResTypeWellKnown.
find(DemangledName);
2335 if (ResIt != ResTypeWellKnown.
end()) {
2338 switch (ResIt->second) {
2339 case WellKnownTypes::Event:
2346 switch (DecorationId) {
2349 case FPDecorationId::SAT:
2352 case FPDecorationId::RTE:
2354 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTE,
B);
2356 case FPDecorationId::RTZ:
2358 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTZ,
B);
2360 case FPDecorationId::RTP:
2362 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTP,
B);
2364 case FPDecorationId::RTN:
2366 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTN,
B);
2372 Type *Ty =
I->getType();
2375 Type *TypeToAssign = Ty;
2377 if (
II->getIntrinsicID() == Intrinsic::spv_const_composite ||
2378 II->getIntrinsicID() == Intrinsic::spv_undef) {
2379 auto It = AggrConstTypes.
find(
II);
2380 if (It == AggrConstTypes.
end())
2382 TypeToAssign = It->second;
2388 for (
const auto &
Op :
I->operands()) {
2395 Type *OpTy =
Op->getType();
2397 CallInst *AssignCI =
2402 Type *OpTy =
Op->getType();
2417 CallInst *AssignCI =
2427bool SPIRVEmitIntrinsics::shouldTryToAddMemAliasingDecoration(
2428 Instruction *Inst) {
2430 if (!STI->
canUseExtension(SPIRV::Extension::SPV_INTEL_memory_access_aliasing))
2441 case Intrinsic::spv_load:
2442 case Intrinsic::spv_store:
2449 const std::string
Prefix =
"__spirv_Atomic";
2450 const bool IsAtomic =
Name.find(Prefix) == 0;
2458void SPIRVEmitIntrinsics::insertSpirvDecorations(Instruction *
I,
2460 if (MDNode *MD =
I->getMetadata(
"spirv.Decorations")) {
2462 B.CreateIntrinsic(Intrinsic::spv_assign_decoration, {
I->getType()},
2467 auto processMemAliasingDecoration = [&](
unsigned Kind) {
2468 if (MDNode *AliasListMD =
I->getMetadata(Kind)) {
2469 if (shouldTryToAddMemAliasingDecoration(
I)) {
2470 uint32_t Dec =
Kind == LLVMContext::MD_alias_scope
2471 ? SPIRV::Decoration::AliasScopeINTEL
2472 : SPIRV::Decoration::NoAliasINTEL;
2474 I, ConstantInt::get(
B.getInt32Ty(), Dec),
2477 B.CreateIntrinsic(Intrinsic::spv_assign_aliasing_decoration,
2478 {
I->getType()}, {
Args});
2482 processMemAliasingDecoration(LLVMContext::MD_alias_scope);
2483 processMemAliasingDecoration(LLVMContext::MD_noalias);
2486 if (MDNode *MD =
I->getMetadata(LLVMContext::MD_fpmath)) {
2488 bool AllowFPMaxError =
2490 if (!AllowFPMaxError)
2494 B.CreateIntrinsic(Intrinsic::spv_assign_fpmaxerror_decoration,
2503 &FPFastMathDefaultInfoMap,
2505 auto it = FPFastMathDefaultInfoMap.
find(
F);
2506 if (it != FPFastMathDefaultInfoMap.
end())
2514 SPIRV::FPFastMathMode::None);
2516 SPIRV::FPFastMathMode::None);
2518 SPIRV::FPFastMathMode::None);
2519 return FPFastMathDefaultInfoMap[
F] = std::move(FPFastMathDefaultInfoVec);
2525 size_t BitWidth = Ty->getScalarSizeInBits();
2529 assert(Index >= 0 && Index < 3 &&
2530 "Expected FPFastMathDefaultInfo for half, float, or double");
2531 assert(FPFastMathDefaultInfoVec.
size() == 3 &&
2532 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2533 return FPFastMathDefaultInfoVec[Index];
2536void SPIRVEmitIntrinsics::insertConstantsForFPFastMathDefault(
Module &M) {
2538 if (!
ST->canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2))
2547 auto Node =
M.getNamedMetadata(
"spirv.ExecutionMode");
2549 if (!
M.getNamedMetadata(
"opencl.enable.FP_CONTRACT")) {
2557 ConstantInt::get(Type::getInt32Ty(
M.getContext()), 0);
2560 [[maybe_unused]] GlobalVariable *GV =
2561 new GlobalVariable(M,
2562 Type::getInt32Ty(
M.getContext()),
2576 DenseMap<Function *, SPIRV::FPFastMathDefaultInfoVector>
2577 FPFastMathDefaultInfoMap;
2579 for (
unsigned i = 0; i <
Node->getNumOperands(); i++) {
2588 if (EM == SPIRV::ExecutionMode::FPFastMathDefault) {
2590 "Expected 4 operands for FPFastMathDefault");
2596 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2598 SPIRV::FPFastMathDefaultInfo &
Info =
2601 Info.FPFastMathDefault =
true;
2602 }
else if (EM == SPIRV::ExecutionMode::ContractionOff) {
2604 "Expected no operands for ContractionOff");
2608 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2610 for (SPIRV::FPFastMathDefaultInfo &Info : FPFastMathDefaultInfoVec) {
2611 Info.ContractionOff =
true;
2613 }
else if (EM == SPIRV::ExecutionMode::SignedZeroInfNanPreserve) {
2615 "Expected 1 operand for SignedZeroInfNanPreserve");
2616 unsigned TargetWidth =
2621 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2625 assert(Index >= 0 && Index < 3 &&
2626 "Expected FPFastMathDefaultInfo for half, float, or double");
2627 assert(FPFastMathDefaultInfoVec.
size() == 3 &&
2628 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2629 FPFastMathDefaultInfoVec[
Index].SignedZeroInfNanPreserve =
true;
2633 std::unordered_map<unsigned, GlobalVariable *> GlobalVars;
2634 for (
auto &[Func, FPFastMathDefaultInfoVec] : FPFastMathDefaultInfoMap) {
2635 if (FPFastMathDefaultInfoVec.
empty())
2638 for (
const SPIRV::FPFastMathDefaultInfo &Info : FPFastMathDefaultInfoVec) {
2639 assert(
Info.Ty &&
"Expected target type for FPFastMathDefaultInfo");
2642 if (Flags == SPIRV::FPFastMathMode::None && !
Info.ContractionOff &&
2643 !
Info.SignedZeroInfNanPreserve && !
Info.FPFastMathDefault)
2647 if (
Info.ContractionOff && (Flags & SPIRV::FPFastMathMode::AllowContract))
2649 "and AllowContract");
2651 if (
Info.SignedZeroInfNanPreserve &&
2653 (SPIRV::FPFastMathMode::NotNaN | SPIRV::FPFastMathMode::NotInf |
2654 SPIRV::FPFastMathMode::NSZ))) {
2655 if (
Info.FPFastMathDefault)
2657 "SignedZeroInfNanPreserve but at least one of "
2658 "NotNaN/NotInf/NSZ is enabled.");
2661 if ((Flags & SPIRV::FPFastMathMode::AllowTransform) &&
2662 !((Flags & SPIRV::FPFastMathMode::AllowReassoc) &&
2663 (Flags & SPIRV::FPFastMathMode::AllowContract))) {
2665 "AllowTransform requires AllowReassoc and "
2666 "AllowContract to be set.");
2669 auto it = GlobalVars.find(Flags);
2670 GlobalVariable *GV =
nullptr;
2671 if (it != GlobalVars.end()) {
2677 ConstantInt::get(Type::getInt32Ty(
M.getContext()), Flags);
2680 GV =
new GlobalVariable(M,
2681 Type::getInt32Ty(
M.getContext()),
2686 GlobalVars[
Flags] = GV;
2692void SPIRVEmitIntrinsics::processInstrAfterVisit(Instruction *
I,
2695 bool IsConstComposite =
2696 II &&
II->getIntrinsicID() == Intrinsic::spv_const_composite;
2697 if (IsConstComposite && TrackConstants) {
2699 auto t = AggrConsts.
find(
I);
2703 {
II->getType(),
II->getType()}, t->second,
I, {},
B);
2705 NewOp->setArgOperand(0,
I);
2708 for (
const auto &
Op :
I->operands()) {
2712 unsigned OpNo =
Op.getOperandNo();
2713 if (
II && ((
II->getIntrinsicID() == Intrinsic::spv_gep && OpNo == 0) ||
2714 (!
II->isBundleOperand(OpNo) &&
2715 II->paramHasAttr(OpNo, Attribute::ImmArg))))
2719 IsPhi ?
B.SetInsertPointPastAllocas(
I->getParent()->getParent())
2720 :
B.SetInsertPoint(
I);
2723 Type *OpTy =
Op->getType();
2731 {OpTy, OpTyVal->
getType()},
Op, OpTyVal, {},
B);
2733 if (!IsConstComposite &&
isPointerTy(OpTy) && OpElemTy !=
nullptr &&
2734 OpElemTy != IntegerType::getInt8Ty(
I->getContext())) {
2736 SmallVector<Value *, 2>
Args = {
2739 CallInst *PtrCasted =
2740 B.CreateIntrinsic(Intrinsic::spv_ptrcast, {
Types},
Args);
2745 I->setOperand(OpNo, NewOp);
2747 if (Named.insert(
I).second)
2751Type *SPIRVEmitIntrinsics::deduceFunParamElementType(Function *
F,
2753 std::unordered_set<Function *> FVisited;
2754 return deduceFunParamElementType(
F,
OpIdx, FVisited);
2757Type *SPIRVEmitIntrinsics::deduceFunParamElementType(
2758 Function *
F,
unsigned OpIdx, std::unordered_set<Function *> &FVisited) {
2760 if (!FVisited.insert(
F).second)
2763 std::unordered_set<Value *> Visited;
2766 for (User *U :
F->users()) {
2778 if (
Type *Ty = deduceElementTypeHelper(OpArg, Visited,
false))
2781 for (User *OpU : OpArg->
users()) {
2783 if (!Inst || Inst == CI)
2786 if (
Type *Ty = deduceElementTypeHelper(Inst, Visited,
false))
2793 if (FVisited.find(OuterF) != FVisited.end())
2795 for (
unsigned i = 0; i < OuterF->
arg_size(); ++i) {
2796 if (OuterF->
getArg(i) == OpArg) {
2797 Lookup.push_back(std::make_pair(OuterF, i));
2804 for (
auto &Pair :
Lookup) {
2805 if (
Type *Ty = deduceFunParamElementType(Pair.first, Pair.second, FVisited))
2812void SPIRVEmitIntrinsics::processParamTypesByFunHeader(Function *
F,
2814 B.SetInsertPointPastAllocas(
F);
2828 for (User *U :
F->users()) {
2844 for (User *U : Arg->
users()) {
2848 CI->
getParent()->getParent() == CurrF) {
2850 deduceOperandElementTypeFunctionPointer(CI,
Ops, ElemTy,
false);
2861void SPIRVEmitIntrinsics::processParamTypes(Function *
F,
IRBuilder<> &
B) {
2862 B.SetInsertPointPastAllocas(
F);
2868 if (!ElemTy && (ElemTy = deduceFunParamElementType(
F,
OpIdx)) !=
nullptr) {
2870 DenseSet<std::pair<Value *, Value *>> VisitedSubst;
2872 propagateElemType(Arg, IntegerType::getInt8Ty(
F->getContext()),
2884 bool IsNewFTy =
false;
2900bool SPIRVEmitIntrinsics::processFunctionPointers(
Module &M) {
2903 if (
F.isIntrinsic())
2905 if (
F.isDeclaration()) {
2906 for (User *U :
F.users()) {
2919 for (User *U :
F.users()) {
2921 if (!
II ||
II->arg_size() != 3 ||
II->getOperand(0) != &
F)
2923 if (
II->getIntrinsicID() == Intrinsic::spv_assign_ptr_type ||
2924 II->getIntrinsicID() == Intrinsic::spv_ptrcast) {
2931 if (Worklist.
empty())
2934 LLVMContext &Ctx =
M.getContext();
2939 for (Function *
F : Worklist) {
2941 for (
const auto &Arg :
F->args())
2943 IRB.CreateCall(
F, Args);
2945 IRB.CreateRetVoid();
2951void SPIRVEmitIntrinsics::applyDemangledPtrArgTypes(
IRBuilder<> &
B) {
2952 DenseMap<Function *, CallInst *> Ptrcasts;
2953 for (
auto It : FDeclPtrTys) {
2955 for (
auto *U :
F->users()) {
2960 for (
auto [Idx, ElemTy] : It.second) {
2968 B.SetInsertPointPastAllocas(Arg->
getParent());
2972 }
else if (isaGEP(Param)) {
2973 replaceUsesOfWithSpvPtrcast(Param,
normalizeType(ElemTy), CI,
2982 .getFirstNonPHIOrDbgOrAlloca());
3003SPIRVEmitIntrinsics::simplifyZeroLengthArrayGepInst(GetElementPtrInst *
GEP) {
3010 Type *SrcTy =
GEP->getSourceElementType();
3011 SmallVector<Value *, 8> Indices(
GEP->indices());
3013 if (ArrTy && ArrTy->getNumElements() == 0 &&
3015 Indices.erase(Indices.begin());
3016 SrcTy = ArrTy->getElementType();
3018 GEP->getNoWrapFlags(),
"",
3019 GEP->getIterator());
3024void SPIRVEmitIntrinsics::emitUnstructuredLoopControls(Function &
F,
3030 if (!
ST->canUseExtension(
3031 SPIRV::Extension::SPV_INTEL_unstructured_loop_controls))
3034 for (BasicBlock &BB :
F) {
3036 MDNode *LoopMD =
Term->getMetadata(LLVMContext::MD_loop);
3042 unsigned LC =
Ops[0];
3043 if (LC == SPIRV::LoopControl::None)
3047 B.SetInsertPoint(Term);
3048 SmallVector<Value *, 4> IntrArgs;
3050 for (
unsigned I = 1;
I <
Ops.size(); ++
I)
3052 B.CreateIntrinsic(Intrinsic::spv_loop_control_intel, IntrArgs);
3056bool SPIRVEmitIntrinsics::runOnFunction(Function &Func) {
3057 if (
Func.isDeclaration())
3061 GR =
ST.getSPIRVGlobalRegistry();
3065 ST.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers);
3070 AggrConstTypes.
clear();
3075 SmallPtrSet<Instruction *, 4> DeadInsts;
3080 if ((!
GEP && !SGEP) || GR->findDeducedElementType(&
I))
3084 GR->addDeducedElementType(SGEP,
3089 GetElementPtrInst *NewGEP = simplifyZeroLengthArrayGepInst(
GEP);
3091 GEP->replaceAllUsesWith(NewGEP);
3095 if (
Type *GepTy = getGEPType(
GEP))
3099 for (
auto *
I : DeadInsts) {
3100 assert(
I->use_empty() &&
"Dead instruction should not have any uses left");
3101 I->eraseFromParent();
3104 processParamTypesByFunHeader(CurrF,
B);
3113 Type *ElTy =
SI->getValueOperand()->getType();
3118 B.SetInsertPoint(&
Func.getEntryBlock(),
Func.getEntryBlock().begin());
3119 for (
auto &GV :
Func.getParent()->globals())
3120 processGlobalValue(GV,
B);
3122 preprocessUndefs(
B);
3123 preprocessCompositeConstants(
B);
3127 applyDemangledPtrArgTypes(
B);
3130 for (
auto &
I : Worklist) {
3132 if (isConvergenceIntrinsic(
I))
3135 bool Postpone = insertAssignPtrTypeIntrs(
I,
B,
false);
3137 insertAssignTypeIntrs(
I,
B);
3138 insertPtrCastOrAssignTypeInstr(
I,
B);
3142 if (Postpone && !GR->findAssignPtrTypeInstr(
I))
3143 insertAssignPtrTypeIntrs(
I,
B,
true);
3146 useRoundingMode(FPI,
B);
3151 SmallPtrSet<Instruction *, 4> IncompleteRets;
3153 deduceOperandElementType(&
I, &IncompleteRets);
3157 for (BasicBlock &BB : Func)
3158 for (PHINode &Phi : BB.
phis())
3160 deduceOperandElementType(&Phi,
nullptr);
3162 for (
auto *
I : Worklist) {
3163 TrackConstants =
true;
3173 if (isConvergenceIntrinsic(
I))
3177 processInstrAfterVisit(
I,
B);
3180 emitUnstructuredLoopControls(Func,
B);
3186bool SPIRVEmitIntrinsics::postprocessTypes(
Module &M) {
3187 if (!GR || TodoTypeSz == 0)
3190 unsigned SzTodo = TodoTypeSz;
3191 DenseMap<Value *, SmallPtrSet<Value *, 4>> ToProcess;
3196 CallInst *AssignCI = GR->findAssignPtrTypeInstr(
Op);
3197 Type *KnownTy = GR->findDeducedElementType(
Op);
3198 if (!KnownTy || !AssignCI)
3204 std::unordered_set<Value *> Visited;
3205 if (
Type *ElemTy = deduceElementTypeHelper(
Op, Visited,
false,
true)) {
3206 if (ElemTy != KnownTy) {
3207 DenseSet<std::pair<Value *, Value *>> VisitedSubst;
3208 propagateElemType(CI, ElemTy, VisitedSubst);
3215 if (
Op->hasUseList()) {
3216 for (User *U :
Op->users()) {
3223 if (TodoTypeSz == 0)
3228 SmallPtrSet<Instruction *, 4> IncompleteRets;
3230 auto It = ToProcess.
find(&
I);
3231 if (It == ToProcess.
end())
3233 It->second.remove_if([
this](
Value *V) {
return !isTodoType(V); });
3234 if (It->second.size() == 0)
3236 deduceOperandElementType(&
I, &IncompleteRets, &It->second,
true);
3237 if (TodoTypeSz == 0)
3242 return SzTodo > TodoTypeSz;
3246void SPIRVEmitIntrinsics::parseFunDeclarations(
Module &M) {
3248 if (!
F.isDeclaration() ||
F.isIntrinsic())
3252 if (DemangledName.empty())
3256 auto [Grp, Opcode, ExtNo] = SPIRV::mapBuiltinToOpcode(
3257 DemangledName,
ST.getPreferredInstructionSet());
3258 if (Opcode != SPIRV::OpGroupAsyncCopy)
3261 SmallVector<unsigned> Idxs;
3270 LLVMContext &Ctx =
F.getContext();
3272 SPIRV::parseBuiltinTypeStr(TypeStrs, DemangledName, Ctx);
3273 if (!TypeStrs.
size())
3276 for (
unsigned Idx : Idxs) {
3277 if (Idx >= TypeStrs.
size())
3280 SPIRV::parseBuiltinCallArgumentType(TypeStrs[Idx].trim(), Ctx))
3283 FDeclPtrTys[&
F].push_back(std::make_pair(Idx, ElemTy));
3288bool SPIRVEmitIntrinsics::processMaskedMemIntrinsic(IntrinsicInst &
I) {
3289 const SPIRVSubtarget &
ST = TM->
getSubtarget<SPIRVSubtarget>(*
I.getFunction());
3291 if (
I.getIntrinsicID() == Intrinsic::masked_gather) {
3292 if (!
ST.canUseExtension(
3293 SPIRV::Extension::SPV_INTEL_masked_gather_scatter)) {
3294 I.getContext().emitError(
3295 &
I,
"llvm.masked.gather requires SPV_INTEL_masked_gather_scatter "
3299 I.eraseFromParent();
3305 Value *Ptrs =
I.getArgOperand(0);
3307 Value *Passthru =
I.getArgOperand(2);
3310 uint32_t Alignment =
I.getParamAlign(0).valueOrOne().value();
3312 SmallVector<Value *, 4>
Args = {Ptrs,
B.getInt32(Alignment),
Mask,
3317 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_masked_gather, Types, Args);
3319 I.eraseFromParent();
3323 if (
I.getIntrinsicID() == Intrinsic::masked_scatter) {
3324 if (!
ST.canUseExtension(
3325 SPIRV::Extension::SPV_INTEL_masked_gather_scatter)) {
3326 I.getContext().emitError(
3327 &
I,
"llvm.masked.scatter requires SPV_INTEL_masked_gather_scatter "
3330 I.eraseFromParent();
3336 Value *Values =
I.getArgOperand(0);
3337 Value *Ptrs =
I.getArgOperand(1);
3342 uint32_t Alignment =
I.getParamAlign(1).valueOrOne().value();
3344 SmallVector<Value *, 4>
Args = {Values, Ptrs,
B.getInt32(Alignment),
Mask};
3348 B.CreateIntrinsic(Intrinsic::spv_masked_scatter, Types, Args);
3349 I.eraseFromParent();
3356bool SPIRVEmitIntrinsics::convertMaskedMemIntrinsics(
Module &M) {
3360 if (!
F.isIntrinsic())
3363 if (IID != Intrinsic::masked_gather && IID != Intrinsic::masked_scatter)
3368 Changed |= processMaskedMemIntrinsic(*
II);
3372 F.eraseFromParent();
3378bool SPIRVEmitIntrinsics::runOnModule(
Module &M) {
3381 Changed |= convertMaskedMemIntrinsics(M);
3383 parseFunDeclarations(M);
3384 insertConstantsForFPFastMathDefault(M);
3395 if (!
F.isDeclaration() && !
F.isIntrinsic()) {
3397 processParamTypes(&
F,
B);
3401 CanTodoType =
false;
3402 Changed |= postprocessTypes(M);
3405 Changed |= processFunctionPointers(M);
3411 return new SPIRVEmitIntrinsics(TM);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static void replaceAllUsesWith(Value *Old, Value *New, SmallPtrSet< BasicBlock *, 32 > &FreshBBs, bool IsHuge)
Replace all old uses with new ones, and push the updated BBs into FreshBBs.
static Type * getPointeeType(Value *Ptr, const DataLayout &DL)
This file defines the DenseSet and SmallDenseSet classes.
static bool runOnFunction(Function &F, bool PostInlining)
iv Induction Variable Users
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Machine Check Debug Module
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
static unsigned getNumElements(Type *Ty)
static bool isMemInstrToReplace(Instruction *I)
static bool isAggrConstForceInt32(const Value *V)
static SPIRV::FPFastMathDefaultInfoVector & getOrCreateFPFastMathDefaultInfoVec(const Module &M, DenseMap< Function *, SPIRV::FPFastMathDefaultInfoVector > &FPFastMathDefaultInfoMap, Function *F)
static Type * getAtomicElemTy(SPIRVGlobalRegistry *GR, Instruction *I, Value *PointerOperand)
static void reportFatalOnTokenType(const Instruction *I)
static void setInsertPointAfterDef(IRBuilder<> &B, Instruction *I)
static void emitAssignName(Instruction *I, IRBuilder<> &B)
static Type * getPointeeTypeByCallInst(StringRef DemangledName, Function *CalledF, unsigned OpIdx)
static void createRoundingModeDecoration(Instruction *I, unsigned RoundingModeDeco, IRBuilder<> &B)
static void createDecorationIntrinsic(Instruction *I, MDNode *Node, IRBuilder<> &B)
static SPIRV::FPFastMathDefaultInfo & getFPFastMathDefaultInfo(SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec, const Type *Ty)
static cl::opt< bool > SpirvEmitOpNames("spirv-emit-op-names", cl::desc("Emit OpName for all instructions"), cl::init(false))
static bool IsKernelArgInt8(Function *F, StoreInst *SI)
static void addSaturatedDecorationToIntrinsic(Instruction *I, IRBuilder<> &B)
static bool isFirstIndexZero(const GetElementPtrInst *GEP)
static void setInsertPointSkippingPhis(IRBuilder<> &B, Instruction *I)
static FunctionType * getFunctionPointerElemType(Function *F, SPIRVGlobalRegistry *GR)
static void createSaturatedConversionDecoration(Instruction *I, IRBuilder<> &B)
static bool shouldEmitIntrinsicsForGlobalValue(const GlobalVariableUsers &GVUsers, const GlobalVariable &GV, const Function *F)
static Type * restoreMutatedType(SPIRVGlobalRegistry *GR, Instruction *I, Type *Ty)
static bool requireAssignType(Instruction *I)
static void insertSpirvDecorations(MachineFunction &MF, SPIRVGlobalRegistry *GR, MachineIRBuilder MIB)
static void visit(BasicBlock &Start, std::function< bool(BasicBlock *)> op)
StringSet - A set-like wrapper for the StringMap.
static SymbolRef::Type getType(const Symbol *Sym)
LocallyHashedType DenseMapInfo< LocallyHashedType >::Empty
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
This class represents an incoming formal argument to a Function.
const Function * getParent() const
static unsigned getPointerOperandIndex()
static unsigned getPointerOperandIndex()
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
const Function * getParent() const
Return the enclosing method, or null if none.
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
static LLVM_ABI BlockAddress * get(Function *F, BasicBlock *BB)
Return a BlockAddress for the specified function and basic block.
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
const DataLayout & getDataLayout() const
Get the data layout of the module this function belongs to.
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Type * getReturnType() const
Returns the type of the ret val.
Argument * getArg(unsigned i) const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static unsigned getPointerOperandIndex()
PointerType * getType() const
Global values are always pointers.
@ InternalLinkage
Rename collisions when linking (static functions).
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
LLVM_ABI void addDestination(BasicBlock *Dest)
Add a destination.
Base class for instruction visitors.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
This is an important class for using LLVM in a threaded context.
static unsigned getPointerOperandIndex()
const MDOperand & getOperand(unsigned I) const
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
unsigned getNumOperands() const
Return number of MDNode operands.
static LLVM_ABI MDString * get(LLVMContext &Context, StringRef Str)
Flags
Flags values. These may be or'd together.
ModulePass class - This class is used to implement unstructured interprocedural optimizations and ana...
A Module instance is used to store all the information related to an LLVM module.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
void addAssignPtrTypeInstr(Value *Val, CallInst *AssignPtrTyCI)
void buildAssignPtr(IRBuilder<> &B, Type *ElemTy, Value *Arg)
Type * findDeducedCompositeType(const Value *Val)
void replaceAllUsesWith(Value *Old, Value *New, bool DeleteOld=true)
void addDeducedElementType(Value *Val, Type *Ty)
void addReturnType(const Function *ArgF, TypedPointerType *DerivedTy)
Type * findMutated(const Value *Val)
void addDeducedCompositeType(Value *Val, Type *Ty)
void buildAssignType(IRBuilder<> &B, Type *Ty, Value *Arg)
Type * findDeducedElementType(const Value *Val)
void updateAssignType(CallInst *AssignCI, Value *Arg, Value *OfType)
CallInst * findAssignPtrTypeInstr(const Value *Val)
const SPIRVTargetLowering * getTargetLowering() const override
bool isLogicalSPIRV() const
bool canUseExtension(SPIRV::Extension::Extension E) const
const SPIRVSubtarget * getSubtargetImpl() const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
void assign(size_type NumElts, ValueParamT Elt)
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
static unsigned getPointerOperandIndex()
iterator find(StringRef Key)
StringRef - Represent a constant reference to a string, i.e.
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
StringSet - A wrapper for StringMap that provides set-like functionality.
bool contains(StringRef key) const
Check if the set contains the given key.
static LLVM_ABI StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
static unsigned getPointerOperandIndex()
static LLVM_ABI TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types={}, ArrayRef< unsigned > Ints={})
Return a target extension type having the specified name and optional type and integer parameters.
const STC & getSubtarget(const Function &F) const
This method returns a pointer to the specified type of TargetSubtargetInfo.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isArrayTy() const
True if this is an instance of ArrayType.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
bool isPointerTy() const
True if this is an instance of PointerType.
Type * getArrayElementType() const
LLVM_ABI StringRef getTargetExtName() const
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
bool isStructTy() const
True if this is an instance of StructType.
bool isTargetExtTy() const
Return true if this is a target extension type.
bool isAggregateType() const
Return true if the type is an aggregate type.
static LLVM_ABI Type * getDoubleTy(LLVMContext &C)
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
bool isVoidTy() const
Return true if this is 'void'.
static LLVM_ABI bool isValidElementType(Type *ElemTy)
Return true if the specified type is valid as a element type.
static LLVM_ABI TypedPointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
void setOperand(unsigned i, Value *Val)
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
user_iterator user_begin()
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
void mutateType(Type *Ty)
Mutate the type of this Value to be of the specified type.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
std::pair< iterator, bool > insert(const ValueT &V)
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
const ParentTy * getParent() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
bool match(Val *V, const Pattern &P)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
initializer< Ty > init(const Ty &Val)
@ User
could "use" a pointer
NodeAddr< PhiNode * > Phi
NodeAddr< NodeBase * > Node
NodeAddr< FuncNode * > Func
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
FunctionAddr VTableAddr Value
bool isTypedPointerWrapper(const TargetExtType *ExtTy)
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
ModulePass * createSPIRVEmitIntrinsicsPass(SPIRVTargetMachine *TM)
unsigned getPointerAddressSpace(const Type *T)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
CallInst * buildIntrWithMD(Intrinsic::ID IntrID, ArrayRef< Type * > Types, Value *Arg, Value *Arg2, ArrayRef< Constant * > Imms, IRBuilder<> &B)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
bool isNestedPointer(const Type *Ty)
Function * getOrCreateBackendServiceFunction(Module &M)
MetadataAsValue * buildMD(Value *Arg)
std::string getOclOrSpirvBuiltinDemangledName(StringRef Name)
SmallVector< unsigned, 1 > getSpirvLoopControlOperandsFromLoopMetadata(MDNode *LoopMD)
auto reverse(ContainerTy &&C)
Type * getTypedPointerWrapper(Type *ElemTy, unsigned AS)
bool isPointerTy(const Type *T)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
bool set_union(S1Ty &S1, const S2Ty &S2)
set_union(A, B) - Compute A := A u B, return whether A changed.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
@ Ref
The access may reference the value stored in memory.
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
DWARFExpression::Operation Op
Type * getPointeeTypeByAttr(Argument *Arg)
bool hasPointeeTypeAttr(Argument *Arg)
constexpr unsigned BitWidth
bool isEquivalentTypes(Type *Ty1, Type *Ty2)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
bool hasInitializer(const GlobalVariable *GV)
Type * normalizeType(Type *Ty)
@ Enabled
Convert any .debug_str_offsets tables to DWARF64 if needed.
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
PoisonValue * getNormalizedPoisonValue(Type *Ty)
bool isUntypedPointerTy(const Type *T)
Type * reconstitutePeeledArrayType(Type *Ty)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
static size_t computeFPFastMathDefaultInfoVecIndex(size_t BitWidth)