27#include "llvm/IR/IntrinsicsSPIRV.h"
37#include <unordered_set>
62 cl::desc(
"Emit OpName for all instructions"),
66#define GET_BuiltinGroup_DECL
67#include "SPIRVGenTables.inc"
72class GlobalVariableUsers {
73 template <
typename T1,
typename T2>
74 using OneToManyMapTy = DenseMap<T1, SmallPtrSet<T2, 4>>;
76 OneToManyMapTy<const GlobalVariable *, const Function *> GlobalIsUsedByFun;
78 void collectGlobalUsers(
79 const GlobalVariable *GV,
80 OneToManyMapTy<const GlobalVariable *, const GlobalVariable *>
81 &GlobalIsUsedByGlobal) {
83 while (!
Stack.empty()) {
87 GlobalIsUsedByFun[GV].insert(
I->getFunction());
92 GlobalIsUsedByGlobal[GV].insert(UserGV);
97 Stack.append(
C->user_begin(),
C->user_end());
101 bool propagateGlobalToGlobalUsers(
102 OneToManyMapTy<const GlobalVariable *, const GlobalVariable *>
103 &GlobalIsUsedByGlobal) {
106 for (
auto &[GV, UserGlobals] : GlobalIsUsedByGlobal) {
107 OldUsersGlobals.
assign(UserGlobals.begin(), UserGlobals.end());
108 for (
const GlobalVariable *UserGV : OldUsersGlobals) {
109 auto It = GlobalIsUsedByGlobal.find(UserGV);
110 if (It == GlobalIsUsedByGlobal.end())
118 void propagateGlobalToFunctionReferences(
119 OneToManyMapTy<const GlobalVariable *, const GlobalVariable *>
120 &GlobalIsUsedByGlobal) {
121 for (
auto &[GV, UserGlobals] : GlobalIsUsedByGlobal) {
122 auto &UserFunctions = GlobalIsUsedByFun[GV];
123 for (
const GlobalVariable *UserGV : UserGlobals) {
124 auto It = GlobalIsUsedByFun.find(UserGV);
125 if (It == GlobalIsUsedByFun.end())
136 OneToManyMapTy<const GlobalVariable *, const GlobalVariable *>
137 GlobalIsUsedByGlobal;
138 GlobalIsUsedByFun.clear();
139 for (GlobalVariable &GV :
M.globals())
140 collectGlobalUsers(&GV, GlobalIsUsedByGlobal);
143 while (propagateGlobalToGlobalUsers(GlobalIsUsedByGlobal))
146 propagateGlobalToFunctionReferences(GlobalIsUsedByGlobal);
149 using FunctionSetType =
typename decltype(GlobalIsUsedByFun)::mapped_type;
150 const FunctionSetType &
151 getTransitiveUserFunctions(
const GlobalVariable &GV)
const {
152 auto It = GlobalIsUsedByFun.find(&GV);
153 if (It != GlobalIsUsedByFun.end())
156 static const FunctionSetType
Empty{};
161static bool isaGEP(
const Value *V) {
167static std::optional<uint64_t> getByteAddressingMultiplier(
Type *Ty) {
173 return AT->getNumElements();
179class SPIRVEmitIntrinsics
181 public InstVisitor<SPIRVEmitIntrinsics, Instruction *> {
182 const SPIRVTargetMachine &TM;
183 SPIRVGlobalRegistry *GR =
nullptr;
185 bool TrackConstants =
true;
186 bool HaveFunPtrs =
false;
187 DenseMap<Instruction *, Constant *> AggrConsts;
188 DenseMap<Instruction *, Type *> AggrConstTypes;
189 DenseSet<Instruction *> AggrStores;
190 SmallPtrSet<Instruction *, 8> DeletedInstrs;
191 GlobalVariableUsers GVUsers;
192 std::unordered_set<Value *> Named;
195 DenseMap<Function *, SmallVector<std::pair<unsigned, Type *>>> FDeclPtrTys;
198 bool CanTodoType =
true;
199 unsigned TodoTypeSz = 0;
200 DenseMap<Value *, bool> TodoType;
201 void insertTodoType(
Value *
Op) {
203 if (CanTodoType && !isaGEP(
Op)) {
204 auto It = TodoType.try_emplace(
Op,
true);
209 void eraseTodoType(
Value *
Op) {
210 auto It = TodoType.find(
Op);
211 if (It != TodoType.end() && It->second) {
219 auto It = TodoType.find(
Op);
220 return It != TodoType.end() && It->second;
224 std::unordered_set<Instruction *> TypeValidated;
227 enum WellKnownTypes { Event };
230 Type *deduceElementType(
Value *
I,
bool UnknownElemTypeI8);
231 Type *deduceElementTypeHelper(
Value *
I,
bool UnknownElemTypeI8);
232 Type *deduceElementTypeHelper(
Value *
I, std::unordered_set<Value *> &Visited,
233 bool UnknownElemTypeI8,
234 bool IgnoreKnownType =
false);
235 Type *deduceElementTypeByValueDeep(
Type *ValueTy,
Value *Operand,
236 bool UnknownElemTypeI8);
237 Type *deduceElementTypeByValueDeep(
Type *ValueTy,
Value *Operand,
238 std::unordered_set<Value *> &Visited,
239 bool UnknownElemTypeI8);
241 std::unordered_set<Value *> &Visited,
242 bool UnknownElemTypeI8);
244 bool UnknownElemTypeI8);
247 Type *deduceNestedTypeHelper(User *U,
bool UnknownElemTypeI8);
248 Type *deduceNestedTypeHelper(User *U,
Type *Ty,
249 std::unordered_set<Value *> &Visited,
250 bool UnknownElemTypeI8);
253 void deduceOperandElementType(Instruction *
I,
254 SmallPtrSet<Instruction *, 4> *IncompleteRets,
255 const SmallPtrSet<Value *, 4> *AskOps =
nullptr,
256 bool IsPostprocessing =
false);
260 void simplifyNullAddrSpaceCasts();
262 Type *reconstructType(
Value *
Op,
bool UnknownElemTypeI8,
263 bool IsPostprocessing);
265 void replaceMemInstrUses(Instruction *Old, Instruction *New,
IRBuilder<> &
B);
267 bool insertAssignPtrTypeIntrs(Instruction *
I,
IRBuilder<> &
B,
268 bool UnknownElemTypeI8);
270 void insertAssignPtrTypeTargetExt(TargetExtType *AssignedType,
Value *V,
272 void replacePointerOperandWithPtrCast(Instruction *
I,
Value *Pointer,
273 Type *ExpectedElementType,
274 unsigned OperandToReplace,
276 void insertPtrCastOrAssignTypeInstr(Instruction *
I,
IRBuilder<> &
B);
277 bool shouldTryToAddMemAliasingDecoration(Instruction *Inst);
279 void insertConstantsForFPFastMathDefault(
Module &M);
281 void processGlobalValue(GlobalVariable &GV,
IRBuilder<> &
B);
283 void processParamTypesByFunHeader(Function *
F,
IRBuilder<> &
B);
284 Type *deduceFunParamElementType(Function *
F,
unsigned OpIdx);
285 Type *deduceFunParamElementType(Function *
F,
unsigned OpIdx,
286 std::unordered_set<Function *> &FVisited);
288 bool deduceOperandElementTypeCalledFunction(
290 Type *&KnownElemTy,
bool &Incomplete);
291 void deduceOperandElementTypeFunctionPointer(
293 Type *&KnownElemTy,
bool IsPostprocessing);
294 bool deduceOperandElementTypeFunctionRet(
295 Instruction *
I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
296 const SmallPtrSet<Value *, 4> *AskOps,
bool IsPostprocessing,
299 CallInst *buildSpvPtrcast(Function *
F,
Value *
Op,
Type *ElemTy);
300 void replaceUsesOfWithSpvPtrcast(
Value *
Op,
Type *ElemTy, Instruction *
I,
301 DenseMap<Function *, CallInst *> Ptrcasts);
303 DenseSet<std::pair<Value *, Value *>> &VisitedSubst);
306 DenseSet<std::pair<Value *, Value *>> &VisitedSubst);
307 void propagateElemTypeRec(
Value *
Op,
Type *PtrElemTy,
Type *CastElemTy,
308 DenseSet<std::pair<Value *, Value *>> &VisitedSubst,
309 std::unordered_set<Value *> &Visited,
310 DenseMap<Function *, CallInst *> Ptrcasts);
313 void replaceAllUsesWithAndErase(
IRBuilder<> &
B, Instruction *Src,
314 Instruction *Dest,
bool DeleteOld =
true);
318 GetElementPtrInst *simplifyZeroLengthArrayGepInst(GetElementPtrInst *
GEP);
321 bool postprocessTypes(
Module &M);
322 bool processFunctionPointers(
Module &M);
323 void parseFunDeclarations(
Module &M);
324 void useRoundingMode(ConstrainedFPIntrinsic *FPI,
IRBuilder<> &
B);
325 bool processMaskedMemIntrinsic(IntrinsicInst &
I);
326 bool convertMaskedMemIntrinsics(
Module &M);
327 void preprocessBoolVectorBitcasts(Function &
F);
329 void emitUnstructuredLoopControls(Function &
F,
IRBuilder<> &
B);
346 bool walkLogicalAccessChain(
347 GetElementPtrInst &
GEP,
348 const std::function<
void(
Type *PointedType, uint64_t Index)>
351 uint64_t Multiplier)> &OnDynamicIndexing);
353 bool walkLogicalAccessChainDynamic(
354 Type *CurType,
Value *Operand, uint64_t Multiplier,
355 const std::function<
void(
Type *, uint64_t)> &OnLiteralIndexing,
356 const std::function<
void(
Type *,
Value *, uint64_t)> &OnDynamicIndexing);
358 bool walkLogicalAccessChainConstant(
360 const std::function<
void(
Type *, uint64_t)> &OnLiteralIndexing);
366 Type *getGEPType(GetElementPtrInst *
GEP);
373 Type *getGEPTypeLogical(GetElementPtrInst *
GEP);
375 Instruction *buildLogicalAccessChainFromGEP(GetElementPtrInst &
GEP);
379 SPIRVEmitIntrinsics(
const SPIRVTargetMachine &TM) : ModulePass(ID), TM(TM) {}
382 Instruction *visitGetElementPtrInst(GetElementPtrInst &
I);
385 Instruction *visitInsertElementInst(InsertElementInst &
I);
386 Instruction *visitExtractElementInst(ExtractElementInst &
I);
388 Instruction *visitExtractValueInst(ExtractValueInst &
I);
392 Instruction *visitAtomicCmpXchgInst(AtomicCmpXchgInst &
I);
396 StringRef getPassName()
const override {
return "SPIRV emit intrinsics"; }
398 bool runOnModule(
Module &M)
override;
400 void getAnalysisUsage(AnalysisUsage &AU)
const override {
401 ModulePass::getAnalysisUsage(AU);
407 Intrinsic::experimental_convergence_loop,
408 Intrinsic::experimental_convergence_anchor>());
411bool expectIgnoredInIRTranslation(
const Instruction *
I) {
413 Intrinsic::spv_resource_handlefrombinding,
414 Intrinsic::spv_resource_getbasepointer,
415 Intrinsic::spv_resource_getpointer>());
422 return getPointerRoot(V);
428char SPIRVEmitIntrinsics::ID = 0;
431 "SPIRV emit intrinsics",
false,
false)
445 bool IsUndefAggregate =
isa<UndefValue>(V) && V->getType()->isAggregateType();
452 B.SetInsertPoint(
I->getParent()->getFirstNonPHIOrDbgOrAlloca());
458 B.SetCurrentDebugLocation(
I->getDebugLoc());
459 if (
I->getType()->isVoidTy())
460 B.SetInsertPoint(
I->getNextNode());
462 B.SetInsertPoint(*
I->getInsertionPointAfterDef());
472 if (
I->getType()->isTokenTy())
474 "does not support token type",
479 if (!
I->hasName() ||
I->getType()->isAggregateType() ||
480 expectIgnoredInIRTranslation(
I))
491 if (
F &&
F->getName().starts_with(
"llvm.spv.alloca"))
502 std::vector<Value *> Args = {
505 B.CreateIntrinsic(Intrinsic::spv_assign_name, {
I->getType()}, Args);
508void SPIRVEmitIntrinsics::replaceAllUsesWith(
Value *Src,
Value *Dest,
512 if (isTodoType(Src)) {
515 insertTodoType(Dest);
519void SPIRVEmitIntrinsics::replaceAllUsesWithAndErase(
IRBuilder<> &
B,
524 std::string
Name = Src->hasName() ? Src->getName().str() :
"";
525 Src->eraseFromParent();
528 if (Named.insert(Dest).second)
553Type *SPIRVEmitIntrinsics::reconstructType(
Value *
Op,
bool UnknownElemTypeI8,
554 bool IsPostprocessing) {
563 if (
auto It = AggrConstTypes.
find(OpI); It != AggrConstTypes.
end())
577 if (UnknownElemTypeI8) {
578 if (!IsPostprocessing)
586CallInst *SPIRVEmitIntrinsics::buildSpvPtrcast(Function *
F,
Value *
Op,
594 B.SetInsertPointPastAllocas(OpA->getParent());
597 B.SetInsertPoint(
F->getEntryBlock().getFirstNonPHIOrDbgOrAlloca());
599 Type *OpTy =
Op->getType();
603 CallInst *PtrCasted =
604 B.CreateIntrinsic(Intrinsic::spv_ptrcast, {
Types},
Args);
609void SPIRVEmitIntrinsics::replaceUsesOfWithSpvPtrcast(
611 DenseMap<Function *, CallInst *> Ptrcasts) {
613 CallInst *PtrCastedI =
nullptr;
614 auto It = Ptrcasts.
find(
F);
615 if (It == Ptrcasts.
end()) {
616 PtrCastedI = buildSpvPtrcast(
F,
Op, ElemTy);
617 Ptrcasts[
F] = PtrCastedI;
619 PtrCastedI = It->second;
621 I->replaceUsesOfWith(
Op, PtrCastedI);
624void SPIRVEmitIntrinsics::propagateElemType(
626 DenseSet<std::pair<Value *, Value *>> &VisitedSubst) {
627 DenseMap<Function *, CallInst *> Ptrcasts;
629 for (
auto *U :
Users) {
632 if (!VisitedSubst.insert(std::make_pair(U,
Op)).second)
637 if (isaGEP(UI) || TypeValidated.find(UI) != TypeValidated.end())
638 replaceUsesOfWithSpvPtrcast(
Op, ElemTy, UI, Ptrcasts);
642void SPIRVEmitIntrinsics::propagateElemTypeRec(
644 DenseSet<std::pair<Value *, Value *>> &VisitedSubst) {
645 std::unordered_set<Value *> Visited;
646 DenseMap<Function *, CallInst *> Ptrcasts;
647 propagateElemTypeRec(
Op, PtrElemTy, CastElemTy, VisitedSubst, Visited,
648 std::move(Ptrcasts));
651void SPIRVEmitIntrinsics::propagateElemTypeRec(
653 DenseSet<std::pair<Value *, Value *>> &VisitedSubst,
654 std::unordered_set<Value *> &Visited,
655 DenseMap<Function *, CallInst *> Ptrcasts) {
656 if (!Visited.insert(
Op).second)
659 for (
auto *U :
Users) {
662 if (!VisitedSubst.insert(std::make_pair(U,
Op)).second)
667 if (isaGEP(UI) || TypeValidated.find(UI) != TypeValidated.end())
668 replaceUsesOfWithSpvPtrcast(
Op, CastElemTy, UI, Ptrcasts);
676SPIRVEmitIntrinsics::deduceElementTypeByValueDeep(
Type *ValueTy,
Value *Operand,
677 bool UnknownElemTypeI8) {
678 std::unordered_set<Value *> Visited;
679 return deduceElementTypeByValueDeep(ValueTy, Operand, Visited,
683Type *SPIRVEmitIntrinsics::deduceElementTypeByValueDeep(
684 Type *ValueTy,
Value *Operand, std::unordered_set<Value *> &Visited,
685 bool UnknownElemTypeI8) {
690 deduceElementTypeHelper(Operand, Visited, UnknownElemTypeI8))
701Type *SPIRVEmitIntrinsics::deduceElementTypeByUsersDeep(
702 Value *
Op, std::unordered_set<Value *> &Visited,
bool UnknownElemTypeI8) {
714 for (User *OpU :
Op->users()) {
716 if (
Type *Ty = deduceElementTypeHelper(Inst, Visited, UnknownElemTypeI8))
729 if ((DemangledName.
starts_with(
"__spirv_ocl_printf(") ||
738Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(
Value *
I,
739 bool UnknownElemTypeI8) {
740 std::unordered_set<Value *> Visited;
741 return deduceElementTypeHelper(
I, Visited, UnknownElemTypeI8);
744void SPIRVEmitIntrinsics::maybeAssignPtrType(
Type *&Ty,
Value *
Op,
Type *RefTy,
745 bool UnknownElemTypeI8) {
747 if (!UnknownElemTypeI8)
756bool SPIRVEmitIntrinsics::walkLogicalAccessChainDynamic(
757 Type *CurType,
Value *Operand, uint64_t Multiplier,
758 const std::function<
void(
Type *, uint64_t)> &OnLiteralIndexing,
759 const std::function<
void(
Type *,
Value *, uint64_t)> &OnDynamicIndexing) {
765 if (
ST->getNumElements() > 0) {
766 CurType =
ST->getElementType(0);
767 OnLiteralIndexing(CurType, 0);
777 OnDynamicIndexing(AT->getElementType(), Operand, Multiplier);
778 return AT ==
nullptr;
781bool SPIRVEmitIntrinsics::walkLogicalAccessChainConstant(
783 const std::function<
void(
Type *, uint64_t)> &OnLiteralIndexing) {
788 uint32_t EltTypeSize =
DL.getTypeSizeInBits(AT->getElementType()) / 8;
792 CurType = AT->getElementType();
793 OnLiteralIndexing(CurType, Index);
795 uint32_t StructSize =
DL.getTypeSizeInBits(ST) / 8;
798 const auto &STL =
DL.getStructLayout(ST);
799 unsigned Element = STL->getElementContainingOffset(
Offset);
800 Offset -= STL->getElementOffset(Element);
801 CurType =
ST->getElementType(Element);
802 OnLiteralIndexing(CurType, Element);
804 Type *EltTy = VT->getElementType();
805 TypeSize EltSizeBits =
DL.getTypeSizeInBits(EltTy);
806 assert(EltSizeBits % 8 == 0 &&
807 "Element type size in bits must be a multiple of 8.");
808 uint32_t EltTypeSize = EltSizeBits / 8;
813 OnLiteralIndexing(CurType, Index);
823bool SPIRVEmitIntrinsics::walkLogicalAccessChain(
824 GetElementPtrInst &
GEP,
825 const std::function<
void(
Type *, uint64_t)> &OnLiteralIndexing,
826 const std::function<
void(
Type *,
Value *, uint64_t)> &OnDynamicIndexing) {
829 std::optional<uint64_t> MultiplierOpt =
830 getByteAddressingMultiplier(
GEP.getSourceElementType());
831 assert(MultiplierOpt &&
"We only rewrite byte-addressing GEP");
832 uint64_t Multiplier = *MultiplierOpt;
835 Value *Src = getPointerRoot(
GEP.getPointerOperand());
836 Type *CurType = deduceElementType(Src,
true);
840 return walkLogicalAccessChainConstant(
841 CurType, CI->getZExtValue() * Multiplier, OnLiteralIndexing);
843 return walkLogicalAccessChainDynamic(CurType, Operand, Multiplier,
844 OnLiteralIndexing, OnDynamicIndexing);
848SPIRVEmitIntrinsics::buildLogicalAccessChainFromGEP(GetElementPtrInst &
GEP) {
851 B.SetInsertPoint(&
GEP);
853 std::vector<Value *> Indices;
854 Indices.push_back(ConstantInt::get(
855 IntegerType::getInt32Ty(CurrF->
getContext()), 0,
false));
856 walkLogicalAccessChain(
858 [&Indices, &
B](
Type *EltType, uint64_t Index) {
860 ConstantInt::get(
B.getInt64Ty(), Index,
false));
863 uint64_t Multiplier) {
865 uint32_t EltTypeSize =
DL.getTypeSizeInBits(EltType) / 8;
867 if (Multiplier == EltTypeSize) {
869 }
else if (EltTypeSize % Multiplier == 0) {
872 EltTypeSize / Multiplier,
876 ConstantInt::get(
Offset->getType(), Multiplier,
879 Index =
B.CreateUDiv(Index,
880 ConstantInt::get(
Offset->getType(), EltTypeSize,
884 Indices.push_back(Index);
888 SmallVector<Value *, 4>
Args;
889 Args.push_back(
B.getInt1(
GEP.isInBounds()));
890 Args.push_back(
GEP.getOperand(0));
892 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_gep, {
Types}, {
Args});
893 replaceAllUsesWithAndErase(
B, &
GEP, NewI);
897Type *SPIRVEmitIntrinsics::getGEPTypeLogical(GetElementPtrInst *
GEP) {
899 Type *CurType =
GEP->getResultElementType();
901 bool Interrupted = walkLogicalAccessChain(
902 *
GEP, [&CurType](
Type *EltType, uint64_t Index) { CurType = EltType; },
903 [&CurType](
Type *EltType,
Value *
Index, uint64_t) { CurType = EltType; });
905 return Interrupted ?
GEP->getResultElementType() : CurType;
908Type *SPIRVEmitIntrinsics::getGEPType(GetElementPtrInst *
Ref) {
909 if (getByteAddressingMultiplier(
Ref->getSourceElementType()) &&
911 return getGEPTypeLogical(
Ref);
918 Ty =
Ref->getSourceElementType();
922 Ty =
Ref->getResultElementType();
927Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(
928 Value *
I, std::unordered_set<Value *> &Visited,
bool UnknownElemTypeI8,
929 bool IgnoreKnownType) {
935 if (!IgnoreKnownType)
940 if (!Visited.insert(
I).second)
947 maybeAssignPtrType(Ty,
I,
Ref->getAllocatedType(), UnknownElemTypeI8);
949 Ty = getGEPType(
Ref);
951 Ty = SGEP->getResultElementType();
956 KnownTy =
Op->getType();
958 maybeAssignPtrType(Ty,
I, ElemTy, UnknownElemTypeI8);
961 Ty = SPIRV::getOriginalFunctionType(*Fn);
964 Ty = deduceElementTypeByValueDeep(
966 Ref->getNumOperands() > 0 ?
Ref->getOperand(0) :
nullptr, Visited,
970 Type *RefTy = deduceElementTypeHelper(
Ref->getPointerOperand(), Visited,
972 maybeAssignPtrType(Ty,
I, RefTy, UnknownElemTypeI8);
974 maybeAssignPtrType(Ty,
I,
Ref->getDestTy(), UnknownElemTypeI8);
976 if (
Type *Src =
Ref->getSrcTy(), *Dest =
Ref->getDestTy();
978 Ty = deduceElementTypeHelper(
Ref->getOperand(0), Visited,
983 Ty = deduceElementTypeHelper(
Op, Visited, UnknownElemTypeI8);
987 Ty = deduceElementTypeHelper(
Op, Visited, UnknownElemTypeI8);
989 Type *BestTy =
nullptr;
991 DenseMap<Type *, unsigned> PhiTys;
992 for (
int i =
Ref->getNumIncomingValues() - 1; i >= 0; --i) {
993 Ty = deduceElementTypeByUsersDeep(
Ref->getIncomingValue(i), Visited,
1000 if (It.first->second > MaxN) {
1001 MaxN = It.first->second;
1009 for (
Value *
Op : {
Ref->getTrueValue(),
Ref->getFalseValue()}) {
1010 Ty = deduceElementTypeByUsersDeep(
Op, Visited, UnknownElemTypeI8);
1015 static StringMap<unsigned> ResTypeByArg = {
1019 {
"__spirv_GenericCastToPtr_ToGlobal", 0},
1020 {
"__spirv_GenericCastToPtr_ToLocal", 0},
1021 {
"__spirv_GenericCastToPtr_ToPrivate", 0},
1022 {
"__spirv_GenericCastToPtrExplicit_ToGlobal", 0},
1023 {
"__spirv_GenericCastToPtrExplicit_ToLocal", 0},
1024 {
"__spirv_GenericCastToPtrExplicit_ToPrivate", 0}};
1028 if (
II && (
II->getIntrinsicID() == Intrinsic::spv_resource_getbasepointer ||
1029 II->getIntrinsicID() == Intrinsic::spv_resource_getpointer)) {
1031 if (HandleType->getTargetExtName() ==
"spirv.Image" ||
1032 HandleType->getTargetExtName() ==
"spirv.SignedImage") {
1033 for (User *U :
II->users()) {
1038 }
else if (HandleType->getTargetExtName() ==
"spirv.VulkanBuffer") {
1040 Ty = HandleType->getTypeParameter(0);
1041 if (
II->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
1055 }
else if (
II &&
II->getIntrinsicID() ==
1056 Intrinsic::spv_generic_cast_to_ptr_explicit) {
1060 std::string DemangledName =
1062 if (DemangledName.length() > 0)
1063 DemangledName = SPIRV::lookupBuiltinNameHelper(DemangledName);
1064 auto AsArgIt = ResTypeByArg.
find(DemangledName);
1065 if (AsArgIt != ResTypeByArg.
end())
1066 Ty = deduceElementTypeHelper(CI->
getArgOperand(AsArgIt->second),
1067 Visited, UnknownElemTypeI8);
1074 if (Ty && !IgnoreKnownType) {
1085Type *SPIRVEmitIntrinsics::deduceNestedTypeHelper(User *U,
1086 bool UnknownElemTypeI8) {
1087 std::unordered_set<Value *> Visited;
1088 return deduceNestedTypeHelper(U,
U->getType(), Visited, UnknownElemTypeI8);
1091Type *SPIRVEmitIntrinsics::deduceNestedTypeHelper(
1092 User *U,
Type *OrigTy, std::unordered_set<Value *> &Visited,
1093 bool UnknownElemTypeI8) {
1102 if (!Visited.insert(U).second)
1107 bool Change =
false;
1108 for (
unsigned i = 0; i <
U->getNumOperands(); ++i) {
1110 assert(
Op &&
"Operands should not be null.");
1111 Type *OpTy =
Op->getType();
1114 if (
Type *NestedTy =
1115 deduceElementTypeHelper(
Op, Visited, UnknownElemTypeI8))
1122 Change |= Ty != OpTy;
1130 if (
Value *
Op =
U->getNumOperands() > 0 ?
U->getOperand(0) :
nullptr) {
1131 Type *OpTy = ArrTy->getElementType();
1134 if (
Type *NestedTy =
1135 deduceElementTypeHelper(
Op, Visited, UnknownElemTypeI8))
1142 Type *NewTy = ArrayType::get(Ty, ArrTy->getNumElements());
1148 if (
Value *
Op =
U->getNumOperands() > 0 ?
U->getOperand(0) :
nullptr) {
1149 Type *OpTy = VecTy->getElementType();
1152 if (
Type *NestedTy =
1153 deduceElementTypeHelper(
Op, Visited, UnknownElemTypeI8))
1160 Type *NewTy = VectorType::get(Ty, VecTy->getElementCount());
1170Type *SPIRVEmitIntrinsics::deduceElementType(
Value *
I,
bool UnknownElemTypeI8) {
1171 if (
Type *Ty = deduceElementTypeHelper(
I, UnknownElemTypeI8))
1173 if (!UnknownElemTypeI8)
1176 return IntegerType::getInt8Ty(
I->getContext());
1180 Value *PointerOperand) {
1186 return I->getType();
1194bool SPIRVEmitIntrinsics::deduceOperandElementTypeCalledFunction(
1196 Type *&KnownElemTy,
bool &Incomplete) {
1200 std::string DemangledName =
1202 if (DemangledName.length() > 0 &&
1204 const SPIRVSubtarget &
ST = TM.
getSubtarget<SPIRVSubtarget>(*CalledF);
1205 auto [Grp, Opcode, ExtNo] = SPIRV::mapBuiltinToOpcode(
1206 DemangledName,
ST.getPreferredInstructionSet());
1207 if (Opcode == SPIRV::OpGroupAsyncCopy) {
1208 for (
unsigned i = 0, PtrCnt = 0; i < CI->
arg_size() && PtrCnt < 2; ++i) {
1214 KnownElemTy = ElemTy;
1215 Ops.push_back(std::make_pair(
Op, i));
1217 }
else if (Grp == SPIRV::Atomic || Grp == SPIRV::AtomicFloating) {
1224 case SPIRV::OpAtomicFAddEXT:
1225 case SPIRV::OpAtomicFMinEXT:
1226 case SPIRV::OpAtomicFMaxEXT:
1227 case SPIRV::OpAtomicLoad:
1228 case SPIRV::OpAtomicCompareExchangeWeak:
1229 case SPIRV::OpAtomicCompareExchange:
1230 case SPIRV::OpAtomicExchange:
1231 case SPIRV::OpAtomicIAdd:
1232 case SPIRV::OpAtomicISub:
1233 case SPIRV::OpAtomicOr:
1234 case SPIRV::OpAtomicXor:
1235 case SPIRV::OpAtomicAnd:
1236 case SPIRV::OpAtomicUMin:
1237 case SPIRV::OpAtomicUMax:
1238 case SPIRV::OpAtomicSMin:
1239 case SPIRV::OpAtomicSMax: {
1244 Incomplete = isTodoType(
Op);
1245 Ops.push_back(std::make_pair(
Op, 0));
1247 case SPIRV::OpAtomicStore: {
1256 Incomplete = isTodoType(
Op);
1257 Ops.push_back(std::make_pair(
Op, 0));
1266void SPIRVEmitIntrinsics::deduceOperandElementTypeFunctionPointer(
1268 Type *&KnownElemTy,
bool IsPostprocessing) {
1272 Ops.push_back(std::make_pair(
Op, std::numeric_limits<unsigned>::max()));
1273 FunctionType *FTy = SPIRV::getOriginalFunctionType(*CI);
1274 bool IsNewFTy =
false, IsIncomplete =
false;
1277 Type *ArgTy = Arg->getType();
1282 if (isTodoType(Arg))
1283 IsIncomplete =
true;
1285 IsIncomplete =
true;
1288 ArgTy = FTy->getFunctionParamType(ParmIdx);
1292 Type *RetTy = FTy->getReturnType();
1299 IsIncomplete =
true;
1301 IsIncomplete =
true;
1304 if (!IsPostprocessing && IsIncomplete)
1307 IsNewFTy ? FunctionType::get(RetTy, ArgTys, FTy->isVarArg()) : FTy;
1310bool SPIRVEmitIntrinsics::deduceOperandElementTypeFunctionRet(
1311 Instruction *
I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
1312 const SmallPtrSet<Value *, 4> *AskOps,
bool IsPostprocessing,
1324 DenseSet<std::pair<Value *, Value *>> VisitedSubst{std::make_pair(
I,
Op)};
1325 for (User *U :
F->users()) {
1333 propagateElemType(CI, PrevElemTy, VisitedSubst);
1343 for (Instruction *IncompleteRetI : *IncompleteRets)
1344 deduceOperandElementType(IncompleteRetI,
nullptr, AskOps,
1346 }
else if (IncompleteRets) {
1349 TypeValidated.insert(
I);
1357void SPIRVEmitIntrinsics::deduceOperandElementType(
1358 Instruction *
I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
1359 const SmallPtrSet<Value *, 4> *AskOps,
bool IsPostprocessing) {
1361 Type *KnownElemTy =
nullptr;
1362 bool Incomplete =
false;
1368 Incomplete = isTodoType(
I);
1369 for (
unsigned i = 0; i <
Ref->getNumIncomingValues(); i++) {
1372 Ops.push_back(std::make_pair(
Op, i));
1378 Incomplete = isTodoType(
I);
1379 Ops.push_back(std::make_pair(
Ref->getPointerOperand(), 0));
1386 Incomplete = isTodoType(
I);
1387 Ops.push_back(std::make_pair(
Ref->getOperand(0), 0));
1391 KnownElemTy =
Ref->getSourceElementType();
1392 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1397 KnownElemTy =
Ref->getBaseType();
1398 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1401 KnownElemTy =
I->getType();
1407 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1411 reconstructType(
Ref->getValueOperand(),
false, IsPostprocessing)))
1416 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1424 Incomplete = isTodoType(
Ref->getPointerOperand());
1425 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1433 Incomplete = isTodoType(
Ref->getPointerOperand());
1434 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1440 Incomplete = isTodoType(
I);
1441 for (
unsigned i = 0; i <
Ref->getNumOperands(); i++) {
1444 Ops.push_back(std::make_pair(
Op, i));
1452 if (deduceOperandElementTypeFunctionRet(
I, IncompleteRets, AskOps,
1453 IsPostprocessing, KnownElemTy,
Op,
1456 Incomplete = isTodoType(CurrF);
1457 Ops.push_back(std::make_pair(
Op, 0));
1463 bool Incomplete0 = isTodoType(Op0);
1464 bool Incomplete1 = isTodoType(Op1);
1466 Type *ElemTy0 = (Incomplete0 && !Incomplete1 && ElemTy1)
1468 : GR->findDeducedElementType(Op0);
1470 KnownElemTy = ElemTy0;
1471 Incomplete = Incomplete0;
1472 Ops.push_back(std::make_pair(Op1, 1));
1473 }
else if (ElemTy1) {
1474 KnownElemTy = ElemTy1;
1475 Incomplete = Incomplete1;
1476 Ops.push_back(std::make_pair(Op0, 0));
1480 deduceOperandElementTypeCalledFunction(CI,
Ops, KnownElemTy, Incomplete);
1481 else if (HaveFunPtrs)
1482 deduceOperandElementTypeFunctionPointer(CI,
Ops, KnownElemTy,
1487 if (!KnownElemTy ||
Ops.size() == 0)
1492 for (
auto &OpIt :
Ops) {
1496 Type *AskTy =
nullptr;
1497 CallInst *AskCI =
nullptr;
1498 if (IsPostprocessing && AskOps) {
1504 if (Ty == KnownElemTy)
1507 Type *OpTy =
Op->getType();
1508 if (
Op->hasUseList() &&
1515 else if (!IsPostprocessing)
1519 if (AssignCI ==
nullptr) {
1528 DenseSet<std::pair<Value *, Value *>> VisitedSubst{
1529 std::make_pair(
I,
Op)};
1530 propagateElemTypeRec(
Op, KnownElemTy, PrevElemTy, VisitedSubst);
1534 CallInst *PtrCastI =
1535 buildSpvPtrcast(
I->getParent()->getParent(),
Op, KnownElemTy);
1536 if (OpIt.second == std::numeric_limits<unsigned>::max())
1539 I->setOperand(OpIt.second, PtrCastI);
1542 TypeValidated.insert(
I);
1545void SPIRVEmitIntrinsics::replaceMemInstrUses(Instruction *Old,
1550 if (isAssignTypeInstr(U)) {
1551 B.SetInsertPoint(U);
1552 SmallVector<Value *, 2>
Args = {
New,
U->getOperand(1)};
1553 CallInst *AssignCI =
1554 B.CreateIntrinsic(Intrinsic::spv_assign_type, {
New->getType()},
Args);
1556 U->eraseFromParent();
1559 U->replaceUsesOfWith(Old, New);
1567 Type *NewArgTy =
New->getType();
1569 if (NewArgTy != ExpectedArgTy) {
1572 M, Intrinsic::spv_abort, {NewArgTy});
1577 if (
Phi->getType() !=
New->getType()) {
1578 Phi->mutateType(
New->getType());
1579 Phi->replaceUsesOfWith(Old, New);
1582 for (User *PhiUser :
Phi->users())
1585 for (ExtractValueInst *EV : EVUsers) {
1586 B.SetInsertPoint(EV);
1588 for (
unsigned Idx : EV->indices())
1589 Args.push_back(
B.getInt32(Idx));
1591 B.CreateIntrinsic(Intrinsic::spv_extractv, {EV->getType()},
Args);
1592 EV->replaceAllUsesWith(NewEV);
1593 DeletedInstrs.
insert(EV);
1594 EV->eraseFromParent();
1597 Phi->replaceUsesOfWith(Old, New);
1603 New->copyMetadata(*Old);
1607void SPIRVEmitIntrinsics::preprocessUndefs(
IRBuilder<> &
B) {
1608 std::queue<Instruction *> Worklist;
1612 while (!Worklist.empty()) {
1614 bool BPrepared =
false;
1617 for (
auto &
Op :
I->operands()) {
1619 if (!AggrUndef || !
Op->getType()->isAggregateType())
1626 auto *IntrUndef =
B.CreateIntrinsic(Intrinsic::spv_undef, {});
1627 Worklist.push(IntrUndef);
1628 I->replaceUsesOfWith(
Op, IntrUndef);
1629 AggrConsts[IntrUndef] = AggrUndef;
1630 AggrConstTypes[IntrUndef] = AggrUndef->getType();
1639void SPIRVEmitIntrinsics::simplifyNullAddrSpaceCasts() {
1643 ASC->replaceAllUsesWith(
1645 ASC->eraseFromParent();
1649void SPIRVEmitIntrinsics::preprocessCompositeConstants(
IRBuilder<> &
B) {
1650 std::queue<Instruction *> Worklist;
1654 while (!Worklist.empty()) {
1655 auto *
I = Worklist.front();
1658 bool KeepInst =
false;
1659 for (
const auto &
Op :
I->operands()) {
1661 Type *ResTy =
nullptr;
1664 ResTy = COp->getType();
1676 ResTy =
Op->getType()->isVectorTy() ? COp->getType() :
B.getInt32Ty();
1681 for (
unsigned i = 0; i < COp->getNumElements(); ++i)
1682 Args.push_back(COp->getElementAsConstant(i));
1688 CE &&
CE->getOpcode() == Instruction::AddrSpaceCast &&
1694 IsPhi ?
B.SetInsertPointPastAllocas(
I->getParent()->getParent())
1695 :
B.SetInsertPoint(
I);
1699 B.CreateIntrinsic(Intrinsic::spv_const_composite, {ResTy}, {
Args});
1703 AggrConsts[CI] = AggrConst;
1704 AggrConstTypes[CI] = deduceNestedTypeHelper(AggrConst,
false);
1716 B.CreateIntrinsic(Intrinsic::spv_assign_decoration, {
I->getType()},
1721 unsigned RoundingModeDeco,
1728 ConstantInt::get(
Int32Ty, SPIRV::Decoration::FPRoundingMode)),
1737 MDNode *SaturatedConversionNode =
1739 Int32Ty, SPIRV::Decoration::SaturatedConversion))});
1759 MDString *ConstraintString =
1768 B.SetInsertPoint(&
Call);
1769 B.CreateIntrinsic(Intrinsic::spv_inline_asm, {
Args});
1774void SPIRVEmitIntrinsics::useRoundingMode(ConstrainedFPIntrinsic *FPI,
1777 if (!
RM.has_value())
1779 unsigned RoundingModeDeco = std::numeric_limits<unsigned>::max();
1780 switch (
RM.value()) {
1784 case RoundingMode::NearestTiesToEven:
1785 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTE;
1787 case RoundingMode::TowardNegative:
1788 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTN;
1790 case RoundingMode::TowardPositive:
1791 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTP;
1793 case RoundingMode::TowardZero:
1794 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTZ;
1796 case RoundingMode::Dynamic:
1797 case RoundingMode::NearestTiesToAway:
1801 if (RoundingModeDeco == std::numeric_limits<unsigned>::max())
1807Instruction *SPIRVEmitIntrinsics::visitSwitchInst(SwitchInst &
I) {
1811 B.SetInsertPoint(&
I);
1812 SmallVector<Value *, 4>
Args;
1814 Args.push_back(
I.getCondition());
1817 for (
auto &Case :
I.cases()) {
1818 Args.push_back(Case.getCaseValue());
1819 BBCases.
push_back(Case.getCaseSuccessor());
1822 CallInst *NewI =
B.CreateIntrinsic(Intrinsic::spv_switch,
1823 {
I.getOperand(0)->getType()}, {
Args});
1827 I.eraseFromParent();
1830 B.SetInsertPoint(ParentBB);
1831 IndirectBrInst *BrI =
B.CreateIndirectBr(
1834 for (BasicBlock *BBCase : BBCases)
1843Instruction *SPIRVEmitIntrinsics::visitIntrinsicInst(IntrinsicInst &
I) {
1849 B.SetInsertPoint(&
I);
1851 SmallVector<Value *, 4>
Args;
1852 Args.push_back(
B.getInt1(
true));
1853 Args.push_back(
I.getOperand(0));
1854 Args.push_back(
B.getInt32(0));
1855 for (
unsigned J = 0; J < SGEP->getNumIndices(); ++J)
1856 Args.push_back(SGEP->getIndexOperand(J));
1858 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_gep, Types, Args);
1859 replaceAllUsesWithAndErase(
B, &
I, NewI);
1863Instruction *SPIRVEmitIntrinsics::visitGetElementPtrInst(GetElementPtrInst &
I) {
1865 B.SetInsertPoint(&
I);
1873 if (getByteAddressingMultiplier(
I.getSourceElementType())) {
1874 return buildLogicalAccessChainFromGEP(
I);
1879 Value *PtrOp =
I.getPointerOperand();
1880 Type *SrcElemTy =
I.getSourceElementType();
1881 Type *DeducedPointeeTy = deduceElementType(PtrOp,
true);
1884 if (ArrTy->getElementType() == SrcElemTy) {
1886 Type *FirstIdxType =
I.getOperand(1)->getType();
1887 NewIndices.
push_back(ConstantInt::get(FirstIdxType, 0));
1888 for (
Value *Idx :
I.indices())
1892 SmallVector<Value *, 4>
Args;
1893 Args.push_back(
B.getInt1(
I.isInBounds()));
1894 Args.push_back(
I.getPointerOperand());
1897 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_gep, {
Types}, {
Args});
1898 replaceAllUsesWithAndErase(
B, &
I, NewI);
1905 SmallVector<Value *, 4>
Args;
1906 Args.push_back(
B.getInt1(
I.isInBounds()));
1908 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_gep, {
Types}, {
Args});
1909 replaceAllUsesWithAndErase(
B, &
I, NewI);
1913Instruction *SPIRVEmitIntrinsics::visitBitCastInst(BitCastInst &
I) {
1915 B.SetInsertPoint(&
I);
1924 I.eraseFromParent();
1930 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_bitcast, {
Types}, {
Args});
1931 replaceAllUsesWithAndErase(
B, &
I, NewI);
1935void SPIRVEmitIntrinsics::insertAssignPtrTypeTargetExt(
1937 Type *VTy =
V->getType();
1942 if (ElemTy != AssignedType)
1955 if (CurrentType == AssignedType)
1962 " for value " +
V->getName(),
1970void SPIRVEmitIntrinsics::replacePointerOperandWithPtrCast(
1971 Instruction *
I,
Value *Pointer,
Type *ExpectedElementType,
1973 TypeValidated.insert(
I);
1976 Type *PointerElemTy = deduceElementTypeHelper(Pointer,
false);
1977 if (PointerElemTy == ExpectedElementType ||
1983 MetadataAsValue *VMD =
buildMD(ExpectedElementVal);
1985 bool FirstPtrCastOrAssignPtrType =
true;
1991 for (
auto User :
Pointer->users()) {
1994 (
II->getIntrinsicID() != Intrinsic::spv_assign_ptr_type &&
1995 II->getIntrinsicID() != Intrinsic::spv_ptrcast) ||
1996 II->getOperand(0) != Pointer)
2001 FirstPtrCastOrAssignPtrType =
false;
2002 if (
II->getOperand(1) != VMD ||
2009 if (
II->getIntrinsicID() != Intrinsic::spv_ptrcast)
2014 if (
II->getParent() !=
I->getParent())
2017 I->setOperand(OperandToReplace,
II);
2023 if (FirstPtrCastOrAssignPtrType) {
2028 }
else if (isTodoType(Pointer)) {
2029 eraseTodoType(Pointer);
2037 DenseSet<std::pair<Value *, Value *>> VisitedSubst{
2038 std::make_pair(
I, Pointer)};
2040 propagateElemType(Pointer, PrevElemTy, VisitedSubst);
2052 auto *PtrCastI =
B.CreateIntrinsic(Intrinsic::spv_ptrcast, {
Types},
Args);
2058void SPIRVEmitIntrinsics::insertPtrCastOrAssignTypeInstr(Instruction *
I,
2063 replacePointerOperandWithPtrCast(
2064 I,
SI->getValueOperand(), IntegerType::getInt8Ty(CurrF->
getContext()),
2070 Type *OpTy =
Op->getType();
2080 It != AggrConstTypes.
end())
2082 if (OpTy ==
Op->getType())
2083 OpTy = deduceElementTypeByValueDeep(OpTy,
Op,
false);
2084 replacePointerOperandWithPtrCast(
I, Pointer, OpTy, 1,
B);
2089 Type *OpTy = LI->getType();
2094 Type *NewOpTy = OpTy;
2095 OpTy = deduceElementTypeByValueDeep(OpTy, LI,
false);
2096 if (OpTy == NewOpTy)
2097 insertTodoType(Pointer);
2100 replacePointerOperandWithPtrCast(
I, Pointer, OpTy, 0,
B);
2105 Type *OpTy =
nullptr;
2117 OpTy = GEPI->getSourceElementType();
2119 replacePointerOperandWithPtrCast(
I, Pointer, OpTy, 0,
B);
2121 insertTodoType(Pointer);
2133 std::string DemangledName =
2137 bool HaveTypes =
false;
2155 for (User *U : CalledArg->
users()) {
2157 if ((ElemTy = deduceElementTypeHelper(Inst,
false)) !=
nullptr)
2163 HaveTypes |= ElemTy !=
nullptr;
2168 if (DemangledName.empty() && !HaveTypes)
2186 Type *ExpectedType =
2188 if (!ExpectedType && !DemangledName.empty())
2189 ExpectedType = SPIRV::parseBuiltinCallArgumentBaseType(
2190 DemangledName,
OpIdx,
I->getContext());
2191 if (!ExpectedType || ExpectedType->
isVoidTy())
2199 replacePointerOperandWithPtrCast(CI, ArgOperand, ExpectedType,
OpIdx,
B);
2203Instruction *SPIRVEmitIntrinsics::visitInsertElementInst(InsertElementInst &
I) {
2210 I.getOperand(1)->getType(),
2211 I.getOperand(2)->getType()};
2213 B.SetInsertPoint(&
I);
2215 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_insertelt, {
Types}, {
Args});
2216 replaceAllUsesWithAndErase(
B, &
I, NewI);
2221SPIRVEmitIntrinsics::visitExtractElementInst(ExtractElementInst &
I) {
2228 B.SetInsertPoint(&
I);
2230 I.getIndexOperand()->getType()};
2231 SmallVector<Value *, 2>
Args = {
I.getVectorOperand(),
I.getIndexOperand()};
2232 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_extractelt, {
Types}, {
Args});
2233 replaceAllUsesWithAndErase(
B, &
I, NewI);
2237Instruction *SPIRVEmitIntrinsics::visitInsertValueInst(InsertValueInst &
I) {
2239 B.SetInsertPoint(&
I);
2242 Value *AggregateOp =
I.getAggregateOperand();
2246 Args.push_back(AggregateOp);
2247 Args.push_back(
I.getInsertedValueOperand());
2248 for (
auto &
Op :
I.indices())
2249 Args.push_back(
B.getInt32(
Op));
2251 B.CreateIntrinsic(Intrinsic::spv_insertv, {
Types}, {
Args});
2252 replaceMemInstrUses(&
I, NewI,
B);
2256Instruction *SPIRVEmitIntrinsics::visitExtractValueInst(ExtractValueInst &
I) {
2257 if (
I.getAggregateOperand()->getType()->isAggregateType())
2260 B.SetInsertPoint(&
I);
2262 for (
auto &
Op :
I.indices())
2263 Args.push_back(
B.getInt32(
Op));
2265 B.CreateIntrinsic(Intrinsic::spv_extractv, {
I.getType()}, {
Args});
2266 replaceAllUsesWithAndErase(
B, &
I, NewI);
2270Instruction *SPIRVEmitIntrinsics::visitLoadInst(LoadInst &
I) {
2271 if (!
I.getType()->isAggregateType())
2274 B.SetInsertPoint(&
I);
2275 TrackConstants =
false;
2280 unsigned IntrinsicId;
2281 SmallVector<Value *, 4>
Args = {
I.getPointerOperand(),
B.getInt16(Flags)};
2282 if (!
I.isAtomic()) {
2283 IntrinsicId = Intrinsic::spv_load;
2284 Args.push_back(
B.getInt32(
I.getAlign().value()));
2286 IntrinsicId = Intrinsic::spv_atomic_load;
2287 Args.push_back(
B.getInt8(
static_cast<uint8_t
>(
I.getOrdering())));
2290 B.CreateIntrinsic(IntrinsicId, {
I.getOperand(0)->getType()},
Args);
2292 replaceMemInstrUses(&
I, NewI,
B);
2296Instruction *SPIRVEmitIntrinsics::visitStoreInst(StoreInst &
I) {
2300 B.SetInsertPoint(&
I);
2301 TrackConstants =
false;
2305 auto *PtrOp =
I.getPointerOperand();
2307 if (
I.getValueOperand()->getType()->isAggregateType()) {
2315 "Unexpected argument of aggregate type, should be spv_extractv!");
2319 unsigned IntrinsicId;
2320 SmallVector<Value *, 4>
Args = {
I.getValueOperand(), PtrOp,
2322 if (!
I.isAtomic()) {
2323 IntrinsicId = Intrinsic::spv_store;
2324 Args.push_back(
B.getInt32(
I.getAlign().value()));
2326 IntrinsicId = Intrinsic::spv_atomic_store;
2327 Args.push_back(
B.getInt8(
static_cast<uint8_t
>(
I.getOrdering())));
2329 auto *NewI =
B.CreateIntrinsic(
2330 IntrinsicId, {
I.getValueOperand()->getType(), PtrOp->
getType()},
Args);
2332 I.eraseFromParent();
2336Instruction *SPIRVEmitIntrinsics::visitAllocaInst(AllocaInst &
I) {
2337 Value *ArraySize =
nullptr;
2338 if (
I.isArrayAllocation()) {
2341 SPIRV::Extension::SPV_INTEL_variable_length_array))
2343 "array allocation: this instruction requires the following "
2344 "SPIR-V extension: SPV_INTEL_variable_length_array",
2346 ArraySize =
I.getArraySize();
2349 B.SetInsertPoint(&
I);
2350 TrackConstants =
false;
2351 Type *PtrTy =
I.getType();
2354 ?
B.CreateIntrinsic(Intrinsic::spv_alloca_array,
2355 {PtrTy, ArraySize->
getType()},
2356 {ArraySize,
B.getInt32(
I.getAlign().value())})
2357 :
B.CreateIntrinsic(
Intrinsic::spv_alloca, {PtrTy},
2358 {
B.getInt32(
I.getAlign().value())});
2359 replaceAllUsesWithAndErase(
B, &
I, NewI);
2363Instruction *SPIRVEmitIntrinsics::visitAtomicCmpXchgInst(AtomicCmpXchgInst &
I) {
2364 assert(
I.getType()->isAggregateType() &&
"Aggregate result is expected");
2366 B.SetInsertPoint(&
I);
2368 Args.push_back(
B.getInt32(
2369 static_cast<uint32_t
>(
getMemScope(
I.getContext(),
I.getSyncScopeID()))));
2372 const SPIRVSubtarget &
ST = TM.
getSubtarget<SPIRVSubtarget>(*
I.getFunction());
2373 unsigned AS =
I.getPointerOperand()->getType()->getPointerAddressSpace();
2374 uint32_t ScSem =
static_cast<uint32_t
>(
2376 Args.push_back(
B.getInt32(
2378 Args.push_back(
B.getInt32(
2380 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_cmpxchg,
2381 {
I.getPointerOperand()->getType()}, {
Args});
2382 replaceMemInstrUses(&
I, NewI,
B);
2391 case Intrinsic::spv_abort:
2393 case Intrinsic::trap:
2394 case Intrinsic::ubsantrap:
2396 return ST.canUseExtension(SPIRV::Extension::SPV_KHR_abort);
2416 [&ST](
const Instruction &
II) { return isAbortCall(II, ST); }) &&
2417 "abort-like call must be the last non-debug instruction before its "
2418 "block's terminator");
2422Instruction *SPIRVEmitIntrinsics::visitUnreachableInst(UnreachableInst &
I) {
2423 const SPIRVSubtarget &
ST = TM.
getSubtarget<SPIRVSubtarget>(*
I.getFunction());
2427 B.CreateIntrinsic(Intrinsic::spv_unreachable, {});
2436 static const StringSet<> ArtificialGlobals{
"llvm.global.annotations",
2437 "llvm.compiler.used",
"llvm.used"};
2442 auto &UserFunctions = GVUsers.getTransitiveUserFunctions(GV);
2443 if (UserFunctions.contains(
F))
2448 if (!UserFunctions.empty())
2453 const Module &M = *
F->getParent();
2454 const Function &FirstDefinition = *M.getFunctionDefs().
begin();
2455 return F == &FirstDefinition;
2458Value *SPIRVEmitIntrinsics::buildSpvUndefComposite(
Type *AggrTy,
2460 SmallVector<Value *, 4> Elems;
2462 Type *ElemTy = ArrTy->getElementType();
2463 auto *UI =
B.CreateIntrinsic(Intrinsic::spv_undef, {});
2465 AggrConstTypes[UI] = ElemTy;
2466 Elems.
assign(ArrTy->getNumElements(), UI);
2469 DenseMap<Type *, Instruction *> UndefByType;
2470 for (
unsigned I = 0;
I < StructTy->getNumElements(); ++
I) {
2472 auto &
Entry = UndefByType[ElemTy];
2474 Entry =
B.CreateIntrinsic(Intrinsic::spv_undef, {});
2476 AggrConstTypes[
Entry] = ElemTy;
2481 auto *Composite =
B.CreateIntrinsic(Intrinsic::spv_const_composite,
2482 {
B.getInt32Ty()}, Elems);
2484 AggrConstTypes[Composite] = AggrTy;
2488void SPIRVEmitIntrinsics::processGlobalValue(GlobalVariable &GV,
2499 deduceElementTypeHelper(&GV,
false);
2501 Value *InitOp = Init;
2503 InitOp = buildSpvUndefComposite(Init->
getType(),
B);
2506 auto *InitInst =
B.CreateIntrinsic(Intrinsic::spv_init_global,
2508 InitInst->setArgOperand(1, InitOp);
2511 B.CreateIntrinsic(Intrinsic::spv_unref_global, GV.
getType(), &GV);
2517bool SPIRVEmitIntrinsics::insertAssignPtrTypeIntrs(Instruction *
I,
2519 bool UnknownElemTypeI8) {
2525 if (
Type *ElemTy = deduceElementType(
I, UnknownElemTypeI8)) {
2532void SPIRVEmitIntrinsics::insertAssignTypeIntrs(Instruction *
I,
2535 static StringMap<unsigned> ResTypeWellKnown = {
2536 {
"async_work_group_copy", WellKnownTypes::Event},
2537 {
"async_work_group_strided_copy", WellKnownTypes::Event},
2538 {
"__spirv_GroupAsyncCopy", WellKnownTypes::Event}};
2542 bool IsKnown =
false;
2547 std::string DemangledName =
2550 if (DemangledName.length() > 0)
2552 SPIRV::lookupBuiltinNameHelper(DemangledName, &DecorationId);
2553 auto ResIt = ResTypeWellKnown.
find(DemangledName);
2554 if (ResIt != ResTypeWellKnown.
end()) {
2557 switch (ResIt->second) {
2558 case WellKnownTypes::Event:
2565 switch (DecorationId) {
2568 case FPDecorationId::SAT:
2571 case FPDecorationId::RTE:
2573 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTE,
B);
2575 case FPDecorationId::RTZ:
2577 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTZ,
B);
2579 case FPDecorationId::RTP:
2581 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTP,
B);
2583 case FPDecorationId::RTN:
2585 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTN,
B);
2591 Type *Ty =
I->getType();
2594 Type *TypeToAssign = Ty;
2596 if (
II->getIntrinsicID() == Intrinsic::spv_const_composite ||
2597 II->getIntrinsicID() == Intrinsic::spv_undef) {
2598 auto It = AggrConstTypes.
find(
II);
2599 if (It == AggrConstTypes.
end())
2601 TypeToAssign = It->second;
2603 }
else if (
auto It = AggrConstTypes.
find(
I); It != AggrConstTypes.
end())
2604 TypeToAssign = It->second;
2608 for (
const auto &
Op :
I->operands()) {
2615 Type *OpTy =
Op->getType();
2617 CallInst *AssignCI =
2622 Type *OpTy =
Op->getType();
2637 CallInst *AssignCI =
2647bool SPIRVEmitIntrinsics::shouldTryToAddMemAliasingDecoration(
2648 Instruction *Inst) {
2650 if (!STI->
canUseExtension(SPIRV::Extension::SPV_INTEL_memory_access_aliasing))
2660void SPIRVEmitIntrinsics::insertSpirvDecorations(Instruction *
I,
2662 if (MDNode *MD =
I->getMetadata(
"spirv.Decorations")) {
2664 B.CreateIntrinsic(Intrinsic::spv_assign_decoration, {
I->getType()},
2669 auto processMemAliasingDecoration = [&](
unsigned Kind) {
2670 if (MDNode *AliasListMD =
I->getMetadata(Kind)) {
2671 if (shouldTryToAddMemAliasingDecoration(
I)) {
2672 uint32_t Dec =
Kind == LLVMContext::MD_alias_scope
2673 ? SPIRV::Decoration::AliasScopeINTEL
2674 : SPIRV::Decoration::NoAliasINTEL;
2676 I, ConstantInt::get(
B.getInt32Ty(), Dec),
2679 B.CreateIntrinsic(Intrinsic::spv_assign_aliasing_decoration,
2680 {
I->getType()}, {
Args});
2684 processMemAliasingDecoration(LLVMContext::MD_alias_scope);
2685 processMemAliasingDecoration(LLVMContext::MD_noalias);
2688 if (MDNode *MD =
I->getMetadata(LLVMContext::MD_fpmath)) {
2690 bool AllowFPMaxError =
2692 if (!AllowFPMaxError)
2696 B.CreateIntrinsic(Intrinsic::spv_assign_fpmaxerror_decoration,
2700 if (
I->getModule()->getTargetTriple().getVendor() ==
Triple::AMD &&
2704 auto &Ctx =
B.getContext();
2706 ConstantInt::get(
B.getInt32Ty(), SPIRV::Decoration::UserSemantic));
2709 if (
I->hasMetadata(
"amdgpu.no.fine.grained.memory"))
2711 Ctx, {US,
MDString::get(Ctx,
"amdgpu.no.fine.grained.memory")}));
2712 if (
I->hasMetadata(
"amdgpu.no.remote.memory"))
2715 if (
I->hasMetadata(
"amdgpu.ignore.denormal.mode"))
2717 Ctx, {US,
MDString::get(Ctx,
"amdgpu.ignore.denormal.mode")}));
2719 B.CreateIntrinsic(Intrinsic::spv_assign_decoration, {
I->getType()},
2727 &FPFastMathDefaultInfoMap,
2729 auto it = FPFastMathDefaultInfoMap.
find(
F);
2730 if (it != FPFastMathDefaultInfoMap.
end())
2738 SPIRV::FPFastMathMode::None);
2740 SPIRV::FPFastMathMode::None);
2742 SPIRV::FPFastMathMode::None);
2743 return FPFastMathDefaultInfoMap[
F] = std::move(FPFastMathDefaultInfoVec);
2749 size_t BitWidth = Ty->getScalarSizeInBits();
2753 assert(Index >= 0 && Index < 3 &&
2754 "Expected FPFastMathDefaultInfo for half, float, or double");
2755 assert(FPFastMathDefaultInfoVec.
size() == 3 &&
2756 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2757 return FPFastMathDefaultInfoVec[Index];
2760void SPIRVEmitIntrinsics::insertConstantsForFPFastMathDefault(
Module &M) {
2762 if (!
ST->canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2))
2771 auto Node =
M.getNamedMetadata(
"spirv.ExecutionMode");
2773 if (!
M.getNamedMetadata(
"opencl.enable.FP_CONTRACT")) {
2781 ConstantInt::get(Type::getInt32Ty(
M.getContext()), 0);
2784 [[maybe_unused]] GlobalVariable *GV =
2785 new GlobalVariable(M,
2786 Type::getInt32Ty(
M.getContext()),
2800 DenseMap<Function *, SPIRV::FPFastMathDefaultInfoVector>
2801 FPFastMathDefaultInfoMap;
2803 for (
unsigned i = 0; i <
Node->getNumOperands(); i++) {
2812 if (EM == SPIRV::ExecutionMode::FPFastMathDefault) {
2814 "Expected 4 operands for FPFastMathDefault");
2820 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2822 SPIRV::FPFastMathDefaultInfo &
Info =
2825 Info.FPFastMathDefault =
true;
2826 }
else if (EM == SPIRV::ExecutionMode::ContractionOff) {
2828 "Expected no operands for ContractionOff");
2832 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2834 for (SPIRV::FPFastMathDefaultInfo &Info : FPFastMathDefaultInfoVec) {
2835 Info.ContractionOff =
true;
2837 }
else if (EM == SPIRV::ExecutionMode::SignedZeroInfNanPreserve) {
2839 "Expected 1 operand for SignedZeroInfNanPreserve");
2840 unsigned TargetWidth =
2845 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2849 assert(Index >= 0 && Index < 3 &&
2850 "Expected FPFastMathDefaultInfo for half, float, or double");
2851 assert(FPFastMathDefaultInfoVec.
size() == 3 &&
2852 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2853 FPFastMathDefaultInfoVec[
Index].SignedZeroInfNanPreserve =
true;
2857 std::unordered_map<unsigned, GlobalVariable *> GlobalVars;
2858 for (
auto &[Func, FPFastMathDefaultInfoVec] : FPFastMathDefaultInfoMap) {
2859 if (FPFastMathDefaultInfoVec.
empty())
2862 for (
const SPIRV::FPFastMathDefaultInfo &Info : FPFastMathDefaultInfoVec) {
2863 assert(
Info.Ty &&
"Expected target type for FPFastMathDefaultInfo");
2866 if (Flags == SPIRV::FPFastMathMode::None && !
Info.ContractionOff &&
2867 !
Info.SignedZeroInfNanPreserve && !
Info.FPFastMathDefault)
2871 if (
Info.ContractionOff && (Flags & SPIRV::FPFastMathMode::AllowContract))
2873 "and AllowContract");
2875 if (
Info.SignedZeroInfNanPreserve &&
2877 (SPIRV::FPFastMathMode::NotNaN | SPIRV::FPFastMathMode::NotInf |
2878 SPIRV::FPFastMathMode::NSZ))) {
2879 if (
Info.FPFastMathDefault)
2881 "SignedZeroInfNanPreserve but at least one of "
2882 "NotNaN/NotInf/NSZ is enabled.");
2885 if ((Flags & SPIRV::FPFastMathMode::AllowTransform) &&
2886 !((Flags & SPIRV::FPFastMathMode::AllowReassoc) &&
2887 (Flags & SPIRV::FPFastMathMode::AllowContract))) {
2889 "AllowTransform requires AllowReassoc and "
2890 "AllowContract to be set.");
2893 auto it = GlobalVars.find(Flags);
2894 GlobalVariable *GV =
nullptr;
2895 if (it != GlobalVars.end()) {
2901 ConstantInt::get(Type::getInt32Ty(
M.getContext()), Flags);
2904 GV =
new GlobalVariable(M,
2905 Type::getInt32Ty(
M.getContext()),
2910 GlobalVars[
Flags] = GV;
2916void SPIRVEmitIntrinsics::processInstrAfterVisit(Instruction *
I,
2919 bool IsConstComposite =
2920 II &&
II->getIntrinsicID() == Intrinsic::spv_const_composite;
2921 if (IsConstComposite && TrackConstants) {
2923 auto t = AggrConsts.
find(
I);
2927 {
II->getType(),
II->getType()}, t->second,
I, {},
B);
2929 NewOp->setArgOperand(0,
I);
2932 for (
const auto &
Op :
I->operands()) {
2936 unsigned OpNo =
Op.getOperandNo();
2937 if (
II && ((
II->getIntrinsicID() == Intrinsic::spv_gep && OpNo == 0) ||
2938 (!
II->isBundleOperand(OpNo) &&
2939 II->paramHasAttr(OpNo, Attribute::ImmArg))))
2943 IsPhi ?
B.SetInsertPointPastAllocas(
I->getParent()->getParent())
2944 :
B.SetInsertPoint(
I);
2947 Type *OpTy =
Op->getType();
2955 {OpTy, OpTyVal->
getType()},
Op, OpTyVal, {},
B);
2957 if (!IsConstComposite &&
isPointerTy(OpTy) && OpElemTy !=
nullptr &&
2958 OpElemTy != IntegerType::getInt8Ty(
I->getContext())) {
2960 SmallVector<Value *, 2>
Args = {
2963 CallInst *PtrCasted =
2964 B.CreateIntrinsic(Intrinsic::spv_ptrcast, {
Types},
Args);
2969 I->setOperand(OpNo, NewOp);
2971 if (Named.insert(
I).second)
2975Type *SPIRVEmitIntrinsics::deduceFunParamElementType(Function *
F,
2977 std::unordered_set<Function *> FVisited;
2978 return deduceFunParamElementType(
F,
OpIdx, FVisited);
2981Type *SPIRVEmitIntrinsics::deduceFunParamElementType(
2982 Function *
F,
unsigned OpIdx, std::unordered_set<Function *> &FVisited) {
2984 if (!FVisited.insert(
F).second)
2987 std::unordered_set<Value *> Visited;
2990 for (User *U :
F->users()) {
3002 if (
Type *Ty = deduceElementTypeHelper(OpArg, Visited,
false))
3005 for (User *OpU : OpArg->
users()) {
3007 if (!Inst || Inst == CI)
3010 if (
Type *Ty = deduceElementTypeHelper(Inst, Visited,
false))
3017 if (FVisited.find(OuterF) != FVisited.end())
3019 for (
unsigned i = 0; i < OuterF->
arg_size(); ++i) {
3020 if (OuterF->
getArg(i) == OpArg) {
3021 Lookup.push_back(std::make_pair(OuterF, i));
3028 for (
auto &Pair :
Lookup) {
3029 if (
Type *Ty = deduceFunParamElementType(Pair.first, Pair.second, FVisited))
3036void SPIRVEmitIntrinsics::processParamTypesByFunHeader(Function *
F,
3038 B.SetInsertPointPastAllocas(
F);
3052 for (User *U :
F->users()) {
3068 for (User *U : Arg->
users()) {
3072 CI->
getParent()->getParent() == CurrF) {
3074 deduceOperandElementTypeFunctionPointer(CI,
Ops, ElemTy,
false);
3085void SPIRVEmitIntrinsics::processParamTypes(Function *
F,
IRBuilder<> &
B) {
3086 B.SetInsertPointPastAllocas(
F);
3092 if (!ElemTy && (ElemTy = deduceFunParamElementType(
F,
OpIdx)) !=
nullptr) {
3094 DenseSet<std::pair<Value *, Value *>> VisitedSubst;
3096 propagateElemType(Arg, IntegerType::getInt8Ty(
F->getContext()),
3108 bool IsNewFTy =
false;
3124bool SPIRVEmitIntrinsics::processFunctionPointers(
Module &M) {
3127 if (
F.isIntrinsic())
3129 if (
F.isDeclaration()) {
3130 for (User *U :
F.users()) {
3143 for (User *U :
F.users()) {
3145 if (!
II ||
II->arg_size() != 3 ||
II->getOperand(0) != &
F)
3147 if (
II->getIntrinsicID() == Intrinsic::spv_assign_ptr_type ||
3148 II->getIntrinsicID() == Intrinsic::spv_ptrcast) {
3155 if (Worklist.
empty())
3158 LLVMContext &Ctx =
M.getContext();
3163 for (Function *
F : Worklist) {
3165 for (
const auto &Arg :
F->args())
3167 IRB.CreateCall(
F, Args);
3169 IRB.CreateRetVoid();
3175void SPIRVEmitIntrinsics::applyDemangledPtrArgTypes(
IRBuilder<> &
B) {
3176 DenseMap<Function *, CallInst *> Ptrcasts;
3177 for (
auto It : FDeclPtrTys) {
3179 for (
auto *U :
F->users()) {
3184 for (
auto [Idx, ElemTy] : It.second) {
3192 B.SetInsertPointPastAllocas(Arg->
getParent());
3196 }
else if (isaGEP(Param)) {
3197 replaceUsesOfWithSpvPtrcast(Param,
normalizeType(ElemTy), CI,
3206 .getFirstNonPHIOrDbgOrAlloca());
3227SPIRVEmitIntrinsics::simplifyZeroLengthArrayGepInst(GetElementPtrInst *
GEP) {
3234 Type *SrcTy =
GEP->getSourceElementType();
3235 SmallVector<Value *, 8> Indices(
GEP->indices());
3237 if (ArrTy && ArrTy->getNumElements() == 0 &&
match(Indices[0],
m_Zero())) {
3238 Indices.erase(Indices.begin());
3239 SrcTy = ArrTy->getElementType();
3241 GEP->getNoWrapFlags(),
"",
3242 GEP->getIterator());
3247void SPIRVEmitIntrinsics::emitUnstructuredLoopControls(Function &
F,
3254 if (
ST->canUseExtension(
3255 SPIRV::Extension::SPV_INTEL_unstructured_loop_controls)) {
3256 for (BasicBlock &BB :
F) {
3258 MDNode *LoopMD =
Term->getMetadata(LLVMContext::MD_loop);
3264 unsigned LC =
Ops[0];
3265 if (LC == SPIRV::LoopControl::None)
3269 B.SetInsertPoint(Term);
3270 SmallVector<Value *, 4> IntrArgs;
3271 for (
unsigned Op :
Ops)
3273 B.CreateIntrinsic(Intrinsic::spv_loop_control_intel, IntrArgs);
3280 DominatorTree DT(
F);
3285 for (Loop *L : LI.getLoopsInPreorder()) {
3296 if (LoopControlOps[0] == SPIRV::LoopControl::None)
3300 B.SetInsertPoint(Header->getTerminator());
3303 SmallVector<Value *, 4>
Args = {MergeAddress, ContinueAddress};
3304 for (
unsigned Imm : LoopControlOps)
3305 Args.emplace_back(
B.getInt32(Imm));
3306 B.CreateIntrinsic(Intrinsic::spv_loop_merge, {
Args});
3310bool SPIRVEmitIntrinsics::runOnFunction(Function &Func) {
3311 if (
Func.isDeclaration())
3315 GR =
ST.getSPIRVGlobalRegistry();
3319 ST.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers);
3324 AggrConstTypes.
clear();
3326 DeletedInstrs.
clear();
3328 processParamTypesByFunHeader(CurrF,
B);
3332 SmallPtrSet<Instruction *, 4> DeadInsts;
3337 if ((!
GEP && !SGEP) || GR->findDeducedElementType(&
I))
3341 GR->addDeducedElementType(SGEP,
3346 GetElementPtrInst *NewGEP = simplifyZeroLengthArrayGepInst(
GEP);
3348 GEP->replaceAllUsesWith(NewGEP);
3352 if (
Type *GepTy = getGEPType(
GEP))
3356 for (
auto *
I : DeadInsts) {
3357 assert(
I->use_empty() &&
"Dead instruction should not have any uses left");
3358 I->eraseFromParent();
3368 Type *ElTy =
SI->getValueOperand()->getType();
3373 B.SetInsertPoint(&
Func.getEntryBlock(),
Func.getEntryBlock().begin());
3374 for (
auto &GV :
Func.getParent()->globals())
3375 processGlobalValue(GV,
B);
3377 preprocessUndefs(
B);
3378 simplifyNullAddrSpaceCasts();
3379 preprocessCompositeConstants(
B);
3381 for (BasicBlock &BB : Func)
3382 for (PHINode &Phi : BB.
phis())
3383 if (
Phi.getType()->isAggregateType()) {
3384 AggrConstTypes[&
Phi] =
Phi.getType();
3385 Phi.mutateType(
B.getInt32Ty());
3388 preprocessBoolVectorBitcasts(Func);
3392 applyDemangledPtrArgTypes(
B);
3395 for (
auto &
I : Worklist) {
3397 if (isConvergenceIntrinsic(
I))
3400 bool Postpone = insertAssignPtrTypeIntrs(
I,
B,
false);
3402 insertAssignTypeIntrs(
I,
B);
3403 insertPtrCastOrAssignTypeInstr(
I,
B);
3407 if (Postpone && !GR->findAssignPtrTypeInstr(
I))
3408 insertAssignPtrTypeIntrs(
I,
B,
true);
3411 useRoundingMode(FPI,
B);
3416 SmallPtrSet<Instruction *, 4> IncompleteRets;
3418 deduceOperandElementType(&
I, &IncompleteRets);
3422 for (BasicBlock &BB : Func)
3423 for (PHINode &Phi : BB.
phis())
3425 deduceOperandElementType(&Phi,
nullptr);
3427 for (
auto *
I : Worklist) {
3428 if (DeletedInstrs.
count(
I))
3430 TrackConstants =
true;
3440 if (isConvergenceIntrinsic(
I))
3444 processInstrAfterVisit(
I,
B);
3447 emitUnstructuredLoopControls(Func,
B);
3453bool SPIRVEmitIntrinsics::postprocessTypes(
Module &M) {
3454 if (!GR || TodoTypeSz == 0)
3457 unsigned SzTodo = TodoTypeSz;
3458 DenseMap<Value *, SmallPtrSet<Value *, 4>> ToProcess;
3463 CallInst *AssignCI = GR->findAssignPtrTypeInstr(
Op);
3464 Type *KnownTy = GR->findDeducedElementType(
Op);
3465 if (!KnownTy || !AssignCI)
3471 std::unordered_set<Value *> Visited;
3472 if (
Type *ElemTy = deduceElementTypeHelper(
Op, Visited,
false,
true)) {
3473 if (ElemTy != KnownTy) {
3474 DenseSet<std::pair<Value *, Value *>> VisitedSubst;
3475 propagateElemType(CI, ElemTy, VisitedSubst);
3482 if (
Op->hasUseList()) {
3483 for (User *U :
Op->users()) {
3490 if (TodoTypeSz == 0)
3495 SmallPtrSet<Instruction *, 4> IncompleteRets;
3497 auto It = ToProcess.
find(&
I);
3498 if (It == ToProcess.
end())
3500 It->second.remove_if([
this](
Value *V) {
return !isTodoType(V); });
3501 if (It->second.size() == 0)
3503 deduceOperandElementType(&
I, &IncompleteRets, &It->second,
true);
3504 if (TodoTypeSz == 0)
3509 return SzTodo > TodoTypeSz;
3513void SPIRVEmitIntrinsics::parseFunDeclarations(
Module &M) {
3515 if (!
F.isDeclaration() ||
F.isIntrinsic())
3519 if (DemangledName.empty())
3523 auto [Grp, Opcode, ExtNo] = SPIRV::mapBuiltinToOpcode(
3524 DemangledName,
ST.getPreferredInstructionSet());
3525 if (Opcode != SPIRV::OpGroupAsyncCopy)
3528 SmallVector<unsigned> Idxs;
3537 LLVMContext &Ctx =
F.getContext();
3539 SPIRV::parseBuiltinTypeStr(TypeStrs, DemangledName, Ctx);
3540 if (!TypeStrs.
size())
3543 for (
unsigned Idx : Idxs) {
3544 if (Idx >= TypeStrs.
size())
3547 SPIRV::parseBuiltinCallArgumentType(TypeStrs[Idx].trim(), Ctx))
3550 FDeclPtrTys[&
F].push_back(std::make_pair(Idx, ElemTy));
3555bool SPIRVEmitIntrinsics::processMaskedMemIntrinsic(IntrinsicInst &
I) {
3556 const SPIRVSubtarget &
ST = TM.
getSubtarget<SPIRVSubtarget>(*
I.getFunction());
3558 if (
I.getIntrinsicID() == Intrinsic::masked_gather) {
3559 if (!
ST.canUseExtension(
3560 SPIRV::Extension::SPV_INTEL_masked_gather_scatter)) {
3561 I.getContext().emitError(
3562 &
I,
"llvm.masked.gather requires SPV_INTEL_masked_gather_scatter "
3566 I.eraseFromParent();
3572 Value *Ptrs =
I.getArgOperand(0);
3574 Value *Passthru =
I.getArgOperand(2);
3577 uint32_t Alignment =
I.getParamAlign(0).valueOrOne().value();
3579 SmallVector<Value *, 4>
Args = {Ptrs,
B.getInt32(Alignment),
Mask,
3584 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_masked_gather, Types, Args);
3586 I.eraseFromParent();
3590 if (
I.getIntrinsicID() == Intrinsic::masked_scatter) {
3591 if (!
ST.canUseExtension(
3592 SPIRV::Extension::SPV_INTEL_masked_gather_scatter)) {
3593 I.getContext().emitError(
3594 &
I,
"llvm.masked.scatter requires SPV_INTEL_masked_gather_scatter "
3597 I.eraseFromParent();
3603 Value *Values =
I.getArgOperand(0);
3604 Value *Ptrs =
I.getArgOperand(1);
3609 uint32_t Alignment =
I.getParamAlign(1).valueOrOne().value();
3611 SmallVector<Value *, 4>
Args = {Values, Ptrs,
B.getInt32(Alignment),
Mask};
3615 B.CreateIntrinsic(Intrinsic::spv_masked_scatter, Types, Args);
3616 I.eraseFromParent();
3627void SPIRVEmitIntrinsics::preprocessBoolVectorBitcasts(Function &
F) {
3628 struct BoolVecBitcast {
3630 FixedVectorType *BoolVecTy;
3634 auto getAsBoolVec = [](
Type *Ty) -> FixedVectorType * {
3636 return (VTy && VTy->getElementType()->
isIntegerTy(1)) ? VTy :
nullptr;
3644 if (
auto *BVTy = getAsBoolVec(BC->getSrcTy()))
3646 else if (
auto *BVTy = getAsBoolVec(BC->getDestTy()))
3650 for (
auto &[BC, BoolVecTy, SrcIsBoolVec] : ToReplace) {
3652 Value *Src = BC->getOperand(0);
3653 unsigned BoolVecN = BoolVecTy->getNumElements();
3655 Type *IntTy =
B.getIntNTy(BoolVecN);
3661 IntVal = ConstantInt::get(IntTy, 0);
3662 for (
unsigned I = 0;
I < BoolVecN; ++
I) {
3663 Value *Elem =
B.CreateExtractElement(Src,
B.getInt32(
I));
3664 Value *Ext =
B.CreateZExt(Elem, IntTy);
3666 Ext =
B.CreateShl(Ext, ConstantInt::get(IntTy,
I));
3667 IntVal =
B.CreateOr(IntVal, Ext);
3673 if (!Src->getType()->isIntegerTy())
3674 IntVal =
B.CreateBitCast(Src, IntTy);
3679 if (!SrcIsBoolVec) {
3682 for (
unsigned I = 0;
I < BoolVecN; ++
I) {
3685 Value *
Cmp =
B.CreateICmpNE(
And, ConstantInt::get(IntTy, 0));
3686 Result =
B.CreateInsertElement(Result, Cmp,
B.getInt32(
I));
3692 if (!BC->getDestTy()->isIntegerTy())
3693 Result =
B.CreateBitCast(IntVal, BC->getDestTy());
3696 BC->replaceAllUsesWith(Result);
3697 BC->eraseFromParent();
3701bool SPIRVEmitIntrinsics::convertMaskedMemIntrinsics(
Module &M) {
3705 if (!
F.isIntrinsic())
3708 if (IID != Intrinsic::masked_gather && IID != Intrinsic::masked_scatter)
3713 Changed |= processMaskedMemIntrinsic(*
II);
3717 F.eraseFromParent();
3723bool SPIRVEmitIntrinsics::runOnModule(
Module &M) {
3726 Changed |= convertMaskedMemIntrinsics(M);
3728 parseFunDeclarations(M);
3729 insertConstantsForFPFastMathDefault(M);
3740 if (!
F.isDeclaration() && !
F.isIntrinsic()) {
3742 processParamTypes(&
F,
B);
3746 CanTodoType =
false;
3747 Changed |= postprocessTypes(M);
3750 Changed |= processFunctionPointers(M);
3757 SPIRVEmitIntrinsics Legacy(TM);
3758 if (Legacy.runOnModule(M))
3764 return new SPIRVEmitIntrinsics(TM);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static void replaceAllUsesWith(Value *Old, Value *New, SmallPtrSet< BasicBlock *, 32 > &FreshBBs, bool IsHuge)
Replace all old uses with new ones, and push the updated BBs into FreshBBs.
static Type * getPointeeType(Value *Ptr, const DataLayout &DL)
This file defines the DenseSet and SmallDenseSet classes.
static bool runOnFunction(Function &F, bool PostInlining)
iv Induction Variable Users
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Machine Check Debug Module
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
static unsigned getNumElements(Type *Ty)
static bool isMemInstrToReplace(Instruction *I)
static bool isAggrConstForceInt32(const Value *V)
static SPIRV::FPFastMathDefaultInfoVector & getOrCreateFPFastMathDefaultInfoVec(const Module &M, DenseMap< Function *, SPIRV::FPFastMathDefaultInfoVector > &FPFastMathDefaultInfoMap, Function *F)
static Type * getAtomicElemTy(SPIRVGlobalRegistry *GR, Instruction *I, Value *PointerOperand)
static void reportFatalOnTokenType(const Instruction *I)
static void setInsertPointAfterDef(IRBuilder<> &B, Instruction *I)
static void emitAssignName(Instruction *I, IRBuilder<> &B)
static Type * getPointeeTypeByCallInst(StringRef DemangledName, Function *CalledF, unsigned OpIdx)
static void createRoundingModeDecoration(Instruction *I, unsigned RoundingModeDeco, IRBuilder<> &B)
static void createDecorationIntrinsic(Instruction *I, MDNode *Node, IRBuilder<> &B)
static SPIRV::FPFastMathDefaultInfo & getFPFastMathDefaultInfo(SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec, const Type *Ty)
static bool isAbortCall(const Instruction &I, const SPIRVSubtarget &ST)
static cl::opt< bool > SpirvEmitOpNames("spirv-emit-op-names", cl::desc("Emit OpName for all instructions"), cl::init(false))
static bool IsKernelArgInt8(Function *F, StoreInst *SI)
static void addSaturatedDecorationToIntrinsic(Instruction *I, IRBuilder<> &B)
static bool isFirstIndexZero(const GetElementPtrInst *GEP)
static void setInsertPointSkippingPhis(IRBuilder<> &B, Instruction *I)
static bool precededByAbortIntrinsic(const UnreachableInst &I, const SPIRVSubtarget &ST)
static FunctionType * getFunctionPointerElemType(Function *F, SPIRVGlobalRegistry *GR)
static void createSaturatedConversionDecoration(Instruction *I, IRBuilder<> &B)
static bool shouldEmitIntrinsicsForGlobalValue(const GlobalVariableUsers &GVUsers, const GlobalVariable &GV, const Function *F)
static Type * restoreMutatedType(SPIRVGlobalRegistry *GR, Instruction *I, Type *Ty)
static bool requireAssignType(Instruction *I)
static void insertSpirvDecorations(MachineFunction &MF, SPIRVGlobalRegistry *GR, MachineIRBuilder MIB)
static void visit(BasicBlock &Start, std::function< bool(BasicBlock *)> op)
StringSet - A set-like wrapper for the StringMap.
static SymbolRef::Type getType(const Symbol *Sym)
LocallyHashedType DenseMapInfo< LocallyHashedType >::Empty
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
This class represents an incoming formal argument to a Function.
const Function * getParent() const
static unsigned getPointerOperandIndex()
static unsigned getPointerOperandIndex()
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
const Function * getParent() const
Return the enclosing method, or null if none.
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
static LLVM_ABI BlockAddress * get(Function *F, BasicBlock *BB)
Return a BlockAddress for the specified function and basic block.
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
void setCalledFunction(Function *Fn)
Sets the function called, including updating the function type.
This class represents a function call, abstracting a target machine's calling convention.
static LLVM_ABI ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Type * getParamType(unsigned i) const
Parameter type accessors.
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
const DataLayout & getDataLayout() const
Get the data layout of the module this function belongs to.
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Type * getReturnType() const
Returns the type of the ret val.
Argument * getArg(unsigned i) const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static unsigned getPointerOperandIndex()
PointerType * getType() const
Global values are always pointers.
@ InternalLinkage
Rename collisions when linking (static functions).
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
LLVM_ABI void addDestination(BasicBlock *Dest)
Add a destination.
Base class for instruction visitors.
LLVM_ABI bool isDebugOrPseudoInst() const LLVM_READONLY
Return true if the instruction is a DbgInfoIntrinsic or PseudoProbeInst.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
This is an important class for using LLVM in a threaded context.
static unsigned getPointerOperandIndex()
const MDOperand & getOperand(unsigned I) const
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
unsigned getNumOperands() const
Return number of MDNode operands.
static LLVM_ABI MDString * get(LLVMContext &Context, StringRef Str)
Flags
Flags values. These may be or'd together.
ModulePass class - This class is used to implement unstructured interprocedural optimizations and ana...
A Module instance is used to store all the information related to an LLVM module.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
void addAssignPtrTypeInstr(Value *Val, CallInst *AssignPtrTyCI)
void buildAssignPtr(IRBuilder<> &B, Type *ElemTy, Value *Arg)
Type * findDeducedCompositeType(const Value *Val)
void replaceAllUsesWith(Value *Old, Value *New, bool DeleteOld=true)
void addDeducedElementType(Value *Val, Type *Ty)
void addReturnType(const Function *ArgF, TypedPointerType *DerivedTy)
Type * findMutated(const Value *Val)
void addDeducedCompositeType(Value *Val, Type *Ty)
void buildAssignType(IRBuilder<> &B, Type *Ty, Value *Arg)
Type * findDeducedElementType(const Value *Val)
void updateAssignType(CallInst *AssignCI, Value *Arg, Value *OfType)
CallInst * findAssignPtrTypeInstr(const Value *Val)
const SPIRVTargetLowering * getTargetLowering() const override
bool isLogicalSPIRV() const
bool canUseExtension(SPIRV::Extension::Extension E) const
const SPIRVSubtarget * getSubtargetImpl() const
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
void assign(size_type NumElts, ValueParamT Elt)
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
static unsigned getPointerOperandIndex()
iterator find(StringRef Key)
Represent a constant reference to a string, i.e.
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
StringSet - A wrapper for StringMap that provides set-like functionality.
bool contains(StringRef key) const
Check if the set contains the given key.
static LLVM_ABI StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
static unsigned getPointerOperandIndex()
static LLVM_ABI TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types={}, ArrayRef< unsigned > Ints={})
Return a target extension type having the specified name and optional type and integer parameters.
const STC & getSubtarget(const Function &F) const
This method returns a pointer to the specified type of TargetSubtargetInfo.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isArrayTy() const
True if this is an instance of ArrayType.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
bool isPointerTy() const
True if this is an instance of PointerType.
Type * getArrayElementType() const
LLVM_ABI StringRef getTargetExtName() const
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
bool isStructTy() const
True if this is an instance of StructType.
bool isTargetExtTy() const
Return true if this is a target extension type.
bool isAggregateType() const
Return true if the type is an aggregate type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
static LLVM_ABI Type * getDoubleTy(LLVMContext &C)
Type * getContainedType(unsigned i) const
This method is used to implement the type iterator (defined at the end of the file).
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
bool isVoidTy() const
Return true if this is 'void'.
static LLVM_ABI bool isValidElementType(Type *ElemTy)
Return true if the specified type is valid as a element type.
static LLVM_ABI TypedPointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
This function has undefined behavior.
void setOperand(unsigned i, Value *Val)
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
user_iterator user_begin()
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
void mutateType(Type *Ty)
Mutate the type of this Value to be of the specified type.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
std::pair< iterator, bool > insert(const ValueT &V)
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
const ParentTy * getParent() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > OverloadTys={})
Look up the Function declaration of the intrinsic id in the Module M.
match_combine_or< Ty... > m_CombineOr(const Ty &...Ps)
Combine pattern matchers matching any of Ps patterns.
bool match(Val *V, const Pattern &P)
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
auto m_Value()
Match an arbitrary value and ignore it.
auto m_AnyIntrinsic()
Matches any intrinsic call and ignore it.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
@ CE
Windows NT (Windows on ARM)
initializer< Ty > init(const Ty &Val)
@ User
could "use" a pointer
NodeAddr< PhiNode * > Phi
NodeAddr< NodeBase * > Node
NodeAddr< FuncNode * > Func
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
FunctionAddr VTableAddr Value
ModulePass * createSPIRVEmitIntrinsicsPass(const SPIRVTargetMachine &TM)
bool isTypedPointerWrapper(const TargetExtType *ExtTy)
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
unsigned getPointerAddressSpace(const Type *T)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
CallInst * buildIntrWithMD(Intrinsic::ID IntrID, ArrayRef< Type * > Types, Value *Arg, Value *Arg2, ArrayRef< Constant * > Imms, IRBuilder<> &B)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
bool isNestedPointer(const Type *Ty)
Function * getOrCreateBackendServiceFunction(Module &M)
MetadataAsValue * buildMD(Value *Arg)
std::string getOclOrSpirvBuiltinDemangledName(StringRef Name)
SmallVector< unsigned, 1 > getSpirvLoopControlOperandsFromLoopMetadata(MDNode *LoopMD)
auto reverse(ContainerTy &&C)
Type * getTypedPointerWrapper(Type *ElemTy, unsigned AS)
bool isPointerTy(const Type *T)
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
bool set_union(S1Ty &S1, const S2Ty &S2)
set_union(A, B) - Compute A := A u B, return whether A changed.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
@ Ref
The access may reference the value stored in memory.
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
@ And
Bitwise or logical AND of integers.
DWARFExpression::Operation Op
Type * getPointeeTypeByAttr(Argument *Arg)
bool hasPointeeTypeAttr(Argument *Arg)
constexpr unsigned BitWidth
bool isEquivalentTypes(Type *Ty1, Type *Ty2)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
bool hasInitializer(const GlobalVariable *GV)
Type * normalizeType(Type *Ty)
@ Enabled
Convert any .debug_str_offsets tables to DWARF64 if needed.
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
PoisonValue * getNormalizedPoisonValue(Type *Ty)
bool isUntypedPointerTy(const Type *T)
Type * reconstitutePeeledArrayType(Type *Ty)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
static size_t computeFPFastMathDefaultInfoVecIndex(size_t BitWidth)