LLVM 23.0.0git
SPIRVEmitIntrinsics.cpp
Go to the documentation of this file.
1//===-- SPIRVEmitIntrinsics.cpp - emit SPIRV intrinsics ---------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The pass emits SPIRV intrinsics keeping essential high-level information for
10// the translation of LLVM IR to SPIR-V.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SPIRV.h"
15#include "SPIRVBuiltins.h"
16#include "SPIRVSubtarget.h"
17#include "SPIRVTargetMachine.h"
18#include "SPIRVUtils.h"
19#include "llvm/ADT/DenseSet.h"
20#include "llvm/ADT/StringSet.h"
21#include "llvm/IR/IRBuilder.h"
23#include "llvm/IR/InstVisitor.h"
24#include "llvm/IR/IntrinsicsSPIRV.h"
29
30#include <cassert>
31#include <queue>
32#include <unordered_set>
33
34// This pass performs the following transformation on LLVM IR level required
35// for the following translation to SPIR-V:
36// - replaces direct usages of aggregate constants with target-specific
37// intrinsics;
38// - replaces aggregates-related instructions (extract/insert, ld/st, etc)
39// with a target-specific intrinsics;
40// - emits intrinsics for the global variable initializers since IRTranslator
41// doesn't handle them and it's not very convenient to translate them
42// ourselves;
43// - emits intrinsics to keep track of the string names assigned to the values;
44// - emits intrinsics to keep track of constants (this is necessary to have an
45// LLVM IR constant after the IRTranslation is completed) for their further
46// deduplication;
47// - emits intrinsics to keep track of original LLVM types of the values
48// to be able to emit proper SPIR-V types eventually.
49//
50// TODO: consider removing spv.track.constant in favor of spv.assign.type.
51
52using namespace llvm;
53
54static cl::opt<bool>
55 SpirvEmitOpNames("spirv-emit-op-names",
56 cl::desc("Emit OpName for all instructions"),
57 cl::init(false));
58
59namespace llvm::SPIRV {
60#define GET_BuiltinGroup_DECL
61#include "SPIRVGenTables.inc"
62} // namespace llvm::SPIRV
63
64namespace {
65// This class keeps track of which functions reference which global variables.
66class GlobalVariableUsers {
67 template <typename T1, typename T2>
68 using OneToManyMapTy = DenseMap<T1, SmallPtrSet<T2, 4>>;
69
70 OneToManyMapTy<const GlobalVariable *, const Function *> GlobalIsUsedByFun;
71
72 void collectGlobalUsers(
73 const GlobalVariable *GV,
74 OneToManyMapTy<const GlobalVariable *, const GlobalVariable *>
75 &GlobalIsUsedByGlobal) {
77 while (!Stack.empty()) {
78 const Value *V = Stack.pop_back_val();
79
80 if (const Instruction *I = dyn_cast<Instruction>(V)) {
81 GlobalIsUsedByFun[GV].insert(I->getFunction());
82 continue;
83 }
84
85 if (const GlobalVariable *UserGV = dyn_cast<GlobalVariable>(V)) {
86 GlobalIsUsedByGlobal[GV].insert(UserGV);
87 continue;
88 }
89
90 if (const Constant *C = dyn_cast<Constant>(V))
91 Stack.append(C->user_begin(), C->user_end());
92 }
93 }
94
95 bool propagateGlobalToGlobalUsers(
96 OneToManyMapTy<const GlobalVariable *, const GlobalVariable *>
97 &GlobalIsUsedByGlobal) {
99 bool Changed = false;
100 for (auto &[GV, UserGlobals] : GlobalIsUsedByGlobal) {
101 OldUsersGlobals.assign(UserGlobals.begin(), UserGlobals.end());
102 for (const GlobalVariable *UserGV : OldUsersGlobals) {
103 auto It = GlobalIsUsedByGlobal.find(UserGV);
104 if (It == GlobalIsUsedByGlobal.end())
105 continue;
106 Changed |= set_union(UserGlobals, It->second);
107 }
108 }
109 return Changed;
110 }
111
112 void propagateGlobalToFunctionReferences(
113 OneToManyMapTy<const GlobalVariable *, const GlobalVariable *>
114 &GlobalIsUsedByGlobal) {
115 for (auto &[GV, UserGlobals] : GlobalIsUsedByGlobal) {
116 auto &UserFunctions = GlobalIsUsedByFun[GV];
117 for (const GlobalVariable *UserGV : UserGlobals) {
118 auto It = GlobalIsUsedByFun.find(UserGV);
119 if (It == GlobalIsUsedByFun.end())
120 continue;
121 set_union(UserFunctions, It->second);
122 }
123 }
124 }
125
126public:
127 void init(Module &M) {
128 // Collect which global variables are referenced by which global variables
129 // and which functions reference each global variables.
130 OneToManyMapTy<const GlobalVariable *, const GlobalVariable *>
131 GlobalIsUsedByGlobal;
132 GlobalIsUsedByFun.clear();
133 for (GlobalVariable &GV : M.globals())
134 collectGlobalUsers(&GV, GlobalIsUsedByGlobal);
135
136 // Compute indirect references by iterating until a fixed point is reached.
137 while (propagateGlobalToGlobalUsers(GlobalIsUsedByGlobal))
138 (void)0;
139
140 propagateGlobalToFunctionReferences(GlobalIsUsedByGlobal);
141 }
142
143 using FunctionSetType = typename decltype(GlobalIsUsedByFun)::mapped_type;
144 const FunctionSetType &
145 getTransitiveUserFunctions(const GlobalVariable &GV) const {
146 auto It = GlobalIsUsedByFun.find(&GV);
147 if (It != GlobalIsUsedByFun.end())
148 return It->second;
149
150 static const FunctionSetType Empty{};
151 return Empty;
152 }
153};
154
155static bool isaGEP(const Value *V) {
157}
158
159class SPIRVEmitIntrinsics
160 : public ModulePass,
161 public InstVisitor<SPIRVEmitIntrinsics, Instruction *> {
162 SPIRVTargetMachine *TM = nullptr;
163 SPIRVGlobalRegistry *GR = nullptr;
164 Function *CurrF = nullptr;
165 bool TrackConstants = true;
166 bool HaveFunPtrs = false;
167 DenseMap<Instruction *, Constant *> AggrConsts;
168 DenseMap<Instruction *, Type *> AggrConstTypes;
169 DenseSet<Instruction *> AggrStores;
170 GlobalVariableUsers GVUsers;
171 std::unordered_set<Value *> Named;
172
173 // map of function declarations to <pointer arg index => element type>
174 DenseMap<Function *, SmallVector<std::pair<unsigned, Type *>>> FDeclPtrTys;
175
176 // a register of Instructions that don't have a complete type definition
177 bool CanTodoType = true;
178 unsigned TodoTypeSz = 0;
179 DenseMap<Value *, bool> TodoType;
180 void insertTodoType(Value *Op) {
181 // TODO: add isa<CallInst>(Op) to no-insert
182 if (CanTodoType && !isaGEP(Op)) {
183 auto It = TodoType.try_emplace(Op, true);
184 if (It.second)
185 ++TodoTypeSz;
186 }
187 }
188 void eraseTodoType(Value *Op) {
189 auto It = TodoType.find(Op);
190 if (It != TodoType.end() && It->second) {
191 It->second = false;
192 --TodoTypeSz;
193 }
194 }
195 bool isTodoType(Value *Op) {
196 if (isaGEP(Op))
197 return false;
198 auto It = TodoType.find(Op);
199 return It != TodoType.end() && It->second;
200 }
201 // a register of Instructions that were visited by deduceOperandElementType()
202 // to validate operand types with an instruction
203 std::unordered_set<Instruction *> TypeValidated;
204
205 // well known result types of builtins
206 enum WellKnownTypes { Event };
207
208 // deduce element type of untyped pointers
209 Type *deduceElementType(Value *I, bool UnknownElemTypeI8);
210 Type *deduceElementTypeHelper(Value *I, bool UnknownElemTypeI8);
211 Type *deduceElementTypeHelper(Value *I, std::unordered_set<Value *> &Visited,
212 bool UnknownElemTypeI8,
213 bool IgnoreKnownType = false);
214 Type *deduceElementTypeByValueDeep(Type *ValueTy, Value *Operand,
215 bool UnknownElemTypeI8);
216 Type *deduceElementTypeByValueDeep(Type *ValueTy, Value *Operand,
217 std::unordered_set<Value *> &Visited,
218 bool UnknownElemTypeI8);
219 Type *deduceElementTypeByUsersDeep(Value *Op,
220 std::unordered_set<Value *> &Visited,
221 bool UnknownElemTypeI8);
222 void maybeAssignPtrType(Type *&Ty, Value *I, Type *RefTy,
223 bool UnknownElemTypeI8);
224
225 // deduce nested types of composites
226 Type *deduceNestedTypeHelper(User *U, bool UnknownElemTypeI8);
227 Type *deduceNestedTypeHelper(User *U, Type *Ty,
228 std::unordered_set<Value *> &Visited,
229 bool UnknownElemTypeI8);
230
231 // deduce Types of operands of the Instruction if possible
232 void deduceOperandElementType(Instruction *I,
233 SmallPtrSet<Instruction *, 4> *IncompleteRets,
234 const SmallPtrSet<Value *, 4> *AskOps = nullptr,
235 bool IsPostprocessing = false);
236
237 void preprocessCompositeConstants(IRBuilder<> &B);
238 void preprocessUndefs(IRBuilder<> &B);
239
240 Type *reconstructType(Value *Op, bool UnknownElemTypeI8,
241 bool IsPostprocessing);
242
243 void replaceMemInstrUses(Instruction *Old, Instruction *New, IRBuilder<> &B);
244 void processInstrAfterVisit(Instruction *I, IRBuilder<> &B);
245 bool insertAssignPtrTypeIntrs(Instruction *I, IRBuilder<> &B,
246 bool UnknownElemTypeI8);
247 void insertAssignTypeIntrs(Instruction *I, IRBuilder<> &B);
248 void insertAssignPtrTypeTargetExt(TargetExtType *AssignedType, Value *V,
249 IRBuilder<> &B);
250 void replacePointerOperandWithPtrCast(Instruction *I, Value *Pointer,
251 Type *ExpectedElementType,
252 unsigned OperandToReplace,
253 IRBuilder<> &B);
254 void insertPtrCastOrAssignTypeInstr(Instruction *I, IRBuilder<> &B);
255 bool shouldTryToAddMemAliasingDecoration(Instruction *Inst);
256 void insertSpirvDecorations(Instruction *I, IRBuilder<> &B);
257 void insertConstantsForFPFastMathDefault(Module &M);
258 void processGlobalValue(GlobalVariable &GV, IRBuilder<> &B);
259 void processParamTypes(Function *F, IRBuilder<> &B);
260 void processParamTypesByFunHeader(Function *F, IRBuilder<> &B);
261 Type *deduceFunParamElementType(Function *F, unsigned OpIdx);
262 Type *deduceFunParamElementType(Function *F, unsigned OpIdx,
263 std::unordered_set<Function *> &FVisited);
264
265 bool deduceOperandElementTypeCalledFunction(
266 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
267 Type *&KnownElemTy, bool &Incomplete);
268 void deduceOperandElementTypeFunctionPointer(
269 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
270 Type *&KnownElemTy, bool IsPostprocessing);
271 bool deduceOperandElementTypeFunctionRet(
272 Instruction *I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
273 const SmallPtrSet<Value *, 4> *AskOps, bool IsPostprocessing,
274 Type *&KnownElemTy, Value *Op, Function *F);
275
276 CallInst *buildSpvPtrcast(Function *F, Value *Op, Type *ElemTy);
277 void replaceUsesOfWithSpvPtrcast(Value *Op, Type *ElemTy, Instruction *I,
278 DenseMap<Function *, CallInst *> Ptrcasts);
279 void propagateElemType(Value *Op, Type *ElemTy,
280 DenseSet<std::pair<Value *, Value *>> &VisitedSubst);
281 void
282 propagateElemTypeRec(Value *Op, Type *PtrElemTy, Type *CastElemTy,
283 DenseSet<std::pair<Value *, Value *>> &VisitedSubst);
284 void propagateElemTypeRec(Value *Op, Type *PtrElemTy, Type *CastElemTy,
285 DenseSet<std::pair<Value *, Value *>> &VisitedSubst,
286 std::unordered_set<Value *> &Visited,
287 DenseMap<Function *, CallInst *> Ptrcasts);
288
289 void replaceAllUsesWith(Value *Src, Value *Dest, bool DeleteOld = true);
290 void replaceAllUsesWithAndErase(IRBuilder<> &B, Instruction *Src,
291 Instruction *Dest, bool DeleteOld = true);
292
293 void applyDemangledPtrArgTypes(IRBuilder<> &B);
294
295 GetElementPtrInst *simplifyZeroLengthArrayGepInst(GetElementPtrInst *GEP);
296
297 bool runOnFunction(Function &F);
298 bool postprocessTypes(Module &M);
299 bool processFunctionPointers(Module &M);
300 void parseFunDeclarations(Module &M);
301 void useRoundingMode(ConstrainedFPIntrinsic *FPI, IRBuilder<> &B);
302 bool processMaskedMemIntrinsic(IntrinsicInst &I);
303 bool convertMaskedMemIntrinsics(Module &M);
304
305 void emitUnstructuredLoopControls(Function &F, IRBuilder<> &B);
306
307 // Tries to walk the type accessed by the given GEP instruction.
308 // For each nested type access, one of the 2 callbacks is called:
309 // - OnLiteralIndexing when the index is a known constant value.
310 // Parameters:
311 // PointedType: the pointed type resulting of this indexing.
312 // If the parent type is an array, this is the index in the array.
313 // If the parent type is a struct, this is the field index.
314 // Index: index of the element in the parent type.
315 // - OnDynamnicIndexing when the index is a non-constant value.
316 // This callback is only called when indexing into an array.
317 // Parameters:
318 // ElementType: the type of the elements stored in the parent array.
319 // Offset: the Value* containing the byte offset into the array.
320 // Return true if an error occurred during the walk, false otherwise.
321 bool walkLogicalAccessChain(
322 GetElementPtrInst &GEP,
323 const std::function<void(Type *PointedType, uint64_t Index)>
324 &OnLiteralIndexing,
325 const std::function<void(Type *ElementType, Value *Offset)>
326 &OnDynamicIndexing);
327
328 // Returns the type accessed using the given GEP instruction by relying
329 // on the GEP type.
330 // FIXME: GEP types are not supposed to be used to retrieve the pointed
331 // type. This must be fixed.
332 Type *getGEPType(GetElementPtrInst *GEP);
333
334 // Returns the type accessed using the given GEP instruction by walking
335 // the source type using the GEP indices.
336 // FIXME: without help from the frontend, this method cannot reliably retrieve
337 // the stored type, nor can robustly determine the depth of the type
338 // we are accessing.
339 Type *getGEPTypeLogical(GetElementPtrInst *GEP);
340
341 Instruction *buildLogicalAccessChainFromGEP(GetElementPtrInst &GEP);
342
343public:
344 static char ID;
345 SPIRVEmitIntrinsics(SPIRVTargetMachine *TM = nullptr)
346 : ModulePass(ID), TM(TM) {}
347 Instruction *visitInstruction(Instruction &I) { return &I; }
348 Instruction *visitSwitchInst(SwitchInst &I);
349 Instruction *visitGetElementPtrInst(GetElementPtrInst &I);
350 Instruction *visitIntrinsicInst(IntrinsicInst &I);
351 Instruction *visitBitCastInst(BitCastInst &I);
352 Instruction *visitInsertElementInst(InsertElementInst &I);
353 Instruction *visitExtractElementInst(ExtractElementInst &I);
354 Instruction *visitInsertValueInst(InsertValueInst &I);
355 Instruction *visitExtractValueInst(ExtractValueInst &I);
356 Instruction *visitLoadInst(LoadInst &I);
357 Instruction *visitStoreInst(StoreInst &I);
358 Instruction *visitAllocaInst(AllocaInst &I);
359 Instruction *visitAtomicCmpXchgInst(AtomicCmpXchgInst &I);
360 Instruction *visitUnreachableInst(UnreachableInst &I);
361 Instruction *visitCallInst(CallInst &I);
362
363 StringRef getPassName() const override { return "SPIRV emit intrinsics"; }
364
365 bool runOnModule(Module &M) override;
366
367 void getAnalysisUsage(AnalysisUsage &AU) const override {
368 ModulePass::getAnalysisUsage(AU);
369 }
370};
371
372bool isConvergenceIntrinsic(const Instruction *I) {
373 const auto *II = dyn_cast<IntrinsicInst>(I);
374 if (!II)
375 return false;
376
377 return II->getIntrinsicID() == Intrinsic::experimental_convergence_entry ||
378 II->getIntrinsicID() == Intrinsic::experimental_convergence_loop ||
379 II->getIntrinsicID() == Intrinsic::experimental_convergence_anchor;
380}
381
382bool expectIgnoredInIRTranslation(const Instruction *I) {
383 const auto *II = dyn_cast<IntrinsicInst>(I);
384 if (!II)
385 return false;
386 switch (II->getIntrinsicID()) {
387 case Intrinsic::invariant_start:
388 case Intrinsic::spv_resource_handlefrombinding:
389 case Intrinsic::spv_resource_getpointer:
390 return true;
391 default:
392 return false;
393 }
394}
395
396// Returns the source pointer from `I` ignoring intermediate ptrcast.
397Value *getPointerRoot(Value *I) {
398 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
399 if (II->getIntrinsicID() == Intrinsic::spv_ptrcast) {
400 Value *V = II->getArgOperand(0);
401 return getPointerRoot(V);
402 }
403 }
404 return I;
405}
406
407} // namespace
408
409char SPIRVEmitIntrinsics::ID = 0;
410
411INITIALIZE_PASS(SPIRVEmitIntrinsics, "emit-intrinsics", "SPIRV emit intrinsics",
412 false, false)
413
414static inline bool isAssignTypeInstr(const Instruction *I) {
415 return isa<IntrinsicInst>(I) &&
416 cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::spv_assign_type;
417}
418
423
424static bool isAggrConstForceInt32(const Value *V) {
425 return isa<ConstantArray>(V) || isa<ConstantStruct>(V) ||
427 (isa<ConstantAggregateZero>(V) && !V->getType()->isVectorTy());
428}
429
431 if (isa<PHINode>(I))
432 B.SetInsertPoint(I->getParent()->getFirstNonPHIOrDbgOrAlloca());
433 else
434 B.SetInsertPoint(I);
435}
436
438 B.SetCurrentDebugLocation(I->getDebugLoc());
439 if (I->getType()->isVoidTy())
440 B.SetInsertPoint(I->getNextNode());
441 else
442 B.SetInsertPoint(*I->getInsertionPointAfterDef());
443}
444
446 if (const auto *Intr = dyn_cast<IntrinsicInst>(I)) {
447 switch (Intr->getIntrinsicID()) {
448 case Intrinsic::invariant_start:
449 case Intrinsic::invariant_end:
450 return false;
451 }
452 }
453 return true;
454}
455
456static inline void reportFatalOnTokenType(const Instruction *I) {
457 if (I->getType()->isTokenTy())
458 report_fatal_error("A token is encountered but SPIR-V without extensions "
459 "does not support token type",
460 false);
461}
462
464 if (!I->hasName() || I->getType()->isAggregateType() ||
465 expectIgnoredInIRTranslation(I))
466 return;
467
468 // We want to be conservative when adding the names because they can interfere
469 // with later optimizations.
470 bool KeepName = SpirvEmitOpNames;
471 if (!KeepName) {
472 if (isa<AllocaInst>(I)) {
473 KeepName = true;
474 } else if (auto *CI = dyn_cast<CallBase>(I)) {
475 Function *F = CI->getCalledFunction();
476 if (F && F->getName().starts_with("llvm.spv.alloca"))
477 KeepName = true;
478 }
479 }
480
481 if (!KeepName)
482 return;
483
486 LLVMContext &Ctx = I->getContext();
487 std::vector<Value *> Args = {
489 Ctx, MDNode::get(Ctx, MDString::get(Ctx, I->getName())))};
490 B.CreateIntrinsic(Intrinsic::spv_assign_name, {I->getType()}, Args);
491}
492
493void SPIRVEmitIntrinsics::replaceAllUsesWith(Value *Src, Value *Dest,
494 bool DeleteOld) {
495 GR->replaceAllUsesWith(Src, Dest, DeleteOld);
496 // Update uncomplete type records if any
497 if (isTodoType(Src)) {
498 if (DeleteOld)
499 eraseTodoType(Src);
500 insertTodoType(Dest);
501 }
502}
503
504void SPIRVEmitIntrinsics::replaceAllUsesWithAndErase(IRBuilder<> &B,
505 Instruction *Src,
506 Instruction *Dest,
507 bool DeleteOld) {
508 replaceAllUsesWith(Src, Dest, DeleteOld);
509 std::string Name = Src->hasName() ? Src->getName().str() : "";
510 Src->eraseFromParent();
511 if (!Name.empty()) {
512 Dest->setName(Name);
513 if (Named.insert(Dest).second)
514 emitAssignName(Dest, B);
515 }
516}
517
519 return SI && F->getCallingConv() == CallingConv::SPIR_KERNEL &&
520 isPointerTy(SI->getValueOperand()->getType()) &&
521 isa<Argument>(SI->getValueOperand());
522}
523
524// Maybe restore original function return type.
526 Type *Ty) {
528 if (!CI || CI->isIndirectCall() || CI->isInlineAsm() ||
530 return Ty;
531 if (Type *OriginalTy = GR->findMutated(CI->getCalledFunction()))
532 return OriginalTy;
533 return Ty;
534}
535
536// Reconstruct type with nested element types according to deduced type info.
537// Return nullptr if no detailed type info is available.
538Type *SPIRVEmitIntrinsics::reconstructType(Value *Op, bool UnknownElemTypeI8,
539 bool IsPostprocessing) {
540 Type *Ty = Op->getType();
541 if (auto *OpI = dyn_cast<Instruction>(Op))
542 Ty = restoreMutatedType(GR, OpI, Ty);
543 if (!isUntypedPointerTy(Ty))
544 return Ty;
545 // try to find the pointee type
546 if (Type *NestedTy = GR->findDeducedElementType(Op))
548 // not a pointer according to the type info (e.g., Event object)
549 CallInst *CI = GR->findAssignPtrTypeInstr(Op);
550 if (CI) {
551 MetadataAsValue *MD = cast<MetadataAsValue>(CI->getArgOperand(1));
552 return cast<ConstantAsMetadata>(MD->getMetadata())->getType();
553 }
554 if (UnknownElemTypeI8) {
555 if (!IsPostprocessing)
556 insertTodoType(Op);
557 return getTypedPointerWrapper(IntegerType::getInt8Ty(Op->getContext()),
559 }
560 return nullptr;
561}
562
563CallInst *SPIRVEmitIntrinsics::buildSpvPtrcast(Function *F, Value *Op,
564 Type *ElemTy) {
565 IRBuilder<> B(Op->getContext());
566 if (auto *OpI = dyn_cast<Instruction>(Op)) {
567 // spv_ptrcast's argument Op denotes an instruction that generates
568 // a value, and we may use getInsertionPointAfterDef()
570 } else if (auto *OpA = dyn_cast<Argument>(Op)) {
571 B.SetInsertPointPastAllocas(OpA->getParent());
572 B.SetCurrentDebugLocation(DebugLoc());
573 } else {
574 B.SetInsertPoint(F->getEntryBlock().getFirstNonPHIOrDbgOrAlloca());
575 }
576 Type *OpTy = Op->getType();
577 SmallVector<Type *, 2> Types = {OpTy, OpTy};
578 SmallVector<Value *, 2> Args = {Op, buildMD(getNormalizedPoisonValue(ElemTy)),
579 B.getInt32(getPointerAddressSpace(OpTy))};
580 CallInst *PtrCasted =
581 B.CreateIntrinsic(Intrinsic::spv_ptrcast, {Types}, Args);
582 GR->buildAssignPtr(B, ElemTy, PtrCasted);
583 return PtrCasted;
584}
585
586void SPIRVEmitIntrinsics::replaceUsesOfWithSpvPtrcast(
587 Value *Op, Type *ElemTy, Instruction *I,
588 DenseMap<Function *, CallInst *> Ptrcasts) {
589 Function *F = I->getParent()->getParent();
590 CallInst *PtrCastedI = nullptr;
591 auto It = Ptrcasts.find(F);
592 if (It == Ptrcasts.end()) {
593 PtrCastedI = buildSpvPtrcast(F, Op, ElemTy);
594 Ptrcasts[F] = PtrCastedI;
595 } else {
596 PtrCastedI = It->second;
597 }
598 I->replaceUsesOfWith(Op, PtrCastedI);
599}
600
601void SPIRVEmitIntrinsics::propagateElemType(
602 Value *Op, Type *ElemTy,
603 DenseSet<std::pair<Value *, Value *>> &VisitedSubst) {
604 DenseMap<Function *, CallInst *> Ptrcasts;
605 SmallVector<User *> Users(Op->users());
606 for (auto *U : Users) {
607 if (!isa<Instruction>(U) || isSpvIntrinsic(U))
608 continue;
609 if (!VisitedSubst.insert(std::make_pair(U, Op)).second)
610 continue;
612 // If the instruction was validated already, we need to keep it valid by
613 // keeping current Op type.
614 if (isaGEP(UI) || TypeValidated.find(UI) != TypeValidated.end())
615 replaceUsesOfWithSpvPtrcast(Op, ElemTy, UI, Ptrcasts);
616 }
617}
618
619void SPIRVEmitIntrinsics::propagateElemTypeRec(
620 Value *Op, Type *PtrElemTy, Type *CastElemTy,
621 DenseSet<std::pair<Value *, Value *>> &VisitedSubst) {
622 std::unordered_set<Value *> Visited;
623 DenseMap<Function *, CallInst *> Ptrcasts;
624 propagateElemTypeRec(Op, PtrElemTy, CastElemTy, VisitedSubst, Visited,
625 std::move(Ptrcasts));
626}
627
628void SPIRVEmitIntrinsics::propagateElemTypeRec(
629 Value *Op, Type *PtrElemTy, Type *CastElemTy,
630 DenseSet<std::pair<Value *, Value *>> &VisitedSubst,
631 std::unordered_set<Value *> &Visited,
632 DenseMap<Function *, CallInst *> Ptrcasts) {
633 if (!Visited.insert(Op).second)
634 return;
635 SmallVector<User *> Users(Op->users());
636 for (auto *U : Users) {
637 if (!isa<Instruction>(U) || isSpvIntrinsic(U))
638 continue;
639 if (!VisitedSubst.insert(std::make_pair(U, Op)).second)
640 continue;
642 // If the instruction was validated already, we need to keep it valid by
643 // keeping current Op type.
644 if (isaGEP(UI) || TypeValidated.find(UI) != TypeValidated.end())
645 replaceUsesOfWithSpvPtrcast(Op, CastElemTy, UI, Ptrcasts);
646 }
647}
648
649// Set element pointer type to the given value of ValueTy and tries to
650// specify this type further (recursively) by Operand value, if needed.
651
652Type *
653SPIRVEmitIntrinsics::deduceElementTypeByValueDeep(Type *ValueTy, Value *Operand,
654 bool UnknownElemTypeI8) {
655 std::unordered_set<Value *> Visited;
656 return deduceElementTypeByValueDeep(ValueTy, Operand, Visited,
657 UnknownElemTypeI8);
658}
659
660Type *SPIRVEmitIntrinsics::deduceElementTypeByValueDeep(
661 Type *ValueTy, Value *Operand, std::unordered_set<Value *> &Visited,
662 bool UnknownElemTypeI8) {
663 Type *Ty = ValueTy;
664 if (Operand) {
665 if (auto *PtrTy = dyn_cast<PointerType>(Ty)) {
666 if (Type *NestedTy =
667 deduceElementTypeHelper(Operand, Visited, UnknownElemTypeI8))
668 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
669 } else {
670 Ty = deduceNestedTypeHelper(dyn_cast<User>(Operand), Ty, Visited,
671 UnknownElemTypeI8);
672 }
673 }
674 return Ty;
675}
676
677// Traverse User instructions to deduce an element pointer type of the operand.
678Type *SPIRVEmitIntrinsics::deduceElementTypeByUsersDeep(
679 Value *Op, std::unordered_set<Value *> &Visited, bool UnknownElemTypeI8) {
680 if (!Op || !isPointerTy(Op->getType()) || isa<ConstantPointerNull>(Op) ||
682 return nullptr;
683
684 if (auto ElemTy = getPointeeType(Op->getType()))
685 return ElemTy;
686
687 // maybe we already know operand's element type
688 if (Type *KnownTy = GR->findDeducedElementType(Op))
689 return KnownTy;
690
691 for (User *OpU : Op->users()) {
692 if (Instruction *Inst = dyn_cast<Instruction>(OpU)) {
693 if (Type *Ty = deduceElementTypeHelper(Inst, Visited, UnknownElemTypeI8))
694 return Ty;
695 }
696 }
697 return nullptr;
698}
699
700// Implements what we know in advance about intrinsics and builtin calls
701// TODO: consider feasibility of this particular case to be generalized by
702// encoding knowledge about intrinsics and builtin calls by corresponding
703// specification rules
705 Function *CalledF, unsigned OpIdx) {
706 if ((DemangledName.starts_with("__spirv_ocl_printf(") ||
707 DemangledName.starts_with("printf(")) &&
708 OpIdx == 0)
709 return IntegerType::getInt8Ty(CalledF->getContext());
710 return nullptr;
711}
712
713// Deduce and return a successfully deduced Type of the Instruction,
714// or nullptr otherwise.
715Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(Value *I,
716 bool UnknownElemTypeI8) {
717 std::unordered_set<Value *> Visited;
718 return deduceElementTypeHelper(I, Visited, UnknownElemTypeI8);
719}
720
721void SPIRVEmitIntrinsics::maybeAssignPtrType(Type *&Ty, Value *Op, Type *RefTy,
722 bool UnknownElemTypeI8) {
723 if (isUntypedPointerTy(RefTy)) {
724 if (!UnknownElemTypeI8)
725 return;
726 insertTodoType(Op);
727 }
728 Ty = RefTy;
729}
730
731bool SPIRVEmitIntrinsics::walkLogicalAccessChain(
732 GetElementPtrInst &GEP,
733 const std::function<void(Type *, uint64_t)> &OnLiteralIndexing,
734 const std::function<void(Type *, Value *)> &OnDynamicIndexing) {
735 // We only rewrite i8* GEP. Other should be left as-is.
736 // Valid i8* GEP must always have a single index.
737 assert(GEP.getSourceElementType() ==
738 IntegerType::getInt8Ty(CurrF->getContext()));
739 assert(GEP.getNumIndices() == 1);
740
741 auto &DL = CurrF->getDataLayout();
742 Value *Src = getPointerRoot(GEP.getPointerOperand());
743 Type *CurType = deduceElementType(Src, true);
744
745 Value *Operand = *GEP.idx_begin();
746 ConstantInt *CI = dyn_cast<ConstantInt>(Operand);
747 if (!CI) {
748 ArrayType *AT = dyn_cast<ArrayType>(CurType);
749 // Operand is not constant. Either we have an array and accept it, or we
750 // give up.
751 if (AT)
752 OnDynamicIndexing(AT->getElementType(), Operand);
753 return AT == nullptr;
754 }
755
756 assert(CI);
757 uint64_t Offset = CI->getZExtValue();
758
759 do {
760 if (ArrayType *AT = dyn_cast<ArrayType>(CurType)) {
761 uint32_t EltTypeSize = DL.getTypeSizeInBits(AT->getElementType()) / 8;
762 assert(Offset < AT->getNumElements() * EltTypeSize);
763 uint64_t Index = Offset / EltTypeSize;
764 Offset = Offset - (Index * EltTypeSize);
765 CurType = AT->getElementType();
766 OnLiteralIndexing(CurType, Index);
767 } else if (StructType *ST = dyn_cast<StructType>(CurType)) {
768 uint32_t StructSize = DL.getTypeSizeInBits(ST) / 8;
769 assert(Offset < StructSize);
770 (void)StructSize;
771 const auto &STL = DL.getStructLayout(ST);
772 unsigned Element = STL->getElementContainingOffset(Offset);
773 Offset -= STL->getElementOffset(Element);
774 CurType = ST->getElementType(Element);
775 OnLiteralIndexing(CurType, Element);
776 } else if (auto *VT = dyn_cast<FixedVectorType>(CurType)) {
777 Type *EltTy = VT->getElementType();
778 TypeSize EltSizeBits = DL.getTypeSizeInBits(EltTy);
779 assert(EltSizeBits % 8 == 0 &&
780 "Element type size in bits must be a multiple of 8.");
781 uint32_t EltTypeSize = EltSizeBits / 8;
782 assert(Offset < VT->getNumElements() * EltTypeSize);
783 uint64_t Index = Offset / EltTypeSize;
784 Offset -= Index * EltTypeSize;
785 CurType = EltTy;
786 OnLiteralIndexing(CurType, Index);
787
788 } else {
789 // Unknown composite kind; give up.
790 return true;
791 }
792 } while (Offset > 0);
793
794 return false;
795}
796
798SPIRVEmitIntrinsics::buildLogicalAccessChainFromGEP(GetElementPtrInst &GEP) {
799 auto &DL = CurrF->getDataLayout();
800 IRBuilder<> B(GEP.getParent());
801 B.SetInsertPoint(&GEP);
802
803 std::vector<Value *> Indices;
804 Indices.push_back(ConstantInt::get(
805 IntegerType::getInt32Ty(CurrF->getContext()), 0, /* Signed= */ false));
806 walkLogicalAccessChain(
807 GEP,
808 [&Indices, &B](Type *EltType, uint64_t Index) {
809 Indices.push_back(
810 ConstantInt::get(B.getInt64Ty(), Index, /* Signed= */ false));
811 },
812 [&Indices, &B, &DL](Type *EltType, Value *Offset) {
813 uint32_t EltTypeSize = DL.getTypeSizeInBits(EltType) / 8;
814 Value *Index = B.CreateUDiv(
815 Offset, ConstantInt::get(Offset->getType(), EltTypeSize,
816 /* Signed= */ false));
817 Indices.push_back(Index);
818 });
819
820 SmallVector<Type *, 2> Types = {GEP.getType(), GEP.getOperand(0)->getType()};
821 SmallVector<Value *, 4> Args;
822 Args.push_back(B.getInt1(GEP.isInBounds()));
823 Args.push_back(GEP.getOperand(0));
824 llvm::append_range(Args, Indices);
825 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args});
826 replaceAllUsesWithAndErase(B, &GEP, NewI);
827 return NewI;
828}
829
830Type *SPIRVEmitIntrinsics::getGEPTypeLogical(GetElementPtrInst *GEP) {
831
832 Type *CurType = GEP->getResultElementType();
833
834 bool Interrupted = walkLogicalAccessChain(
835 *GEP, [&CurType](Type *EltType, uint64_t Index) { CurType = EltType; },
836 [&CurType](Type *EltType, Value *Index) { CurType = EltType; });
837
838 return Interrupted ? GEP->getResultElementType() : CurType;
839}
840
841Type *SPIRVEmitIntrinsics::getGEPType(GetElementPtrInst *Ref) {
842 if (Ref->getSourceElementType() ==
843 IntegerType::getInt8Ty(CurrF->getContext()) &&
845 return getGEPTypeLogical(Ref);
846 }
847
848 Type *Ty = nullptr;
849 // TODO: not sure if GetElementPtrInst::getTypeAtIndex() does anything
850 // useful here
851 if (isNestedPointer(Ref->getSourceElementType())) {
852 Ty = Ref->getSourceElementType();
853 for (Use &U : drop_begin(Ref->indices()))
854 Ty = GetElementPtrInst::getTypeAtIndex(Ty, U.get());
855 } else {
856 Ty = Ref->getResultElementType();
857 }
858 return Ty;
859}
860
861Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(
862 Value *I, std::unordered_set<Value *> &Visited, bool UnknownElemTypeI8,
863 bool IgnoreKnownType) {
864 // allow to pass nullptr as an argument
865 if (!I)
866 return nullptr;
867
868 // maybe already known
869 if (!IgnoreKnownType)
870 if (Type *KnownTy = GR->findDeducedElementType(I))
871 return KnownTy;
872
873 // maybe a cycle
874 if (!Visited.insert(I).second)
875 return nullptr;
876
877 // fallback value in case when we fail to deduce a type
878 Type *Ty = nullptr;
879 // look for known basic patterns of type inference
880 if (auto *Ref = dyn_cast<AllocaInst>(I)) {
881 maybeAssignPtrType(Ty, I, Ref->getAllocatedType(), UnknownElemTypeI8);
882 } else if (auto *Ref = dyn_cast<GetElementPtrInst>(I)) {
883 Ty = getGEPType(Ref);
884 } else if (auto *SGEP = dyn_cast<StructuredGEPInst>(I)) {
885 Ty = SGEP->getResultElementType();
886 } else if (auto *Ref = dyn_cast<LoadInst>(I)) {
887 Value *Op = Ref->getPointerOperand();
888 Type *KnownTy = GR->findDeducedElementType(Op);
889 if (!KnownTy)
890 KnownTy = Op->getType();
891 if (Type *ElemTy = getPointeeType(KnownTy))
892 maybeAssignPtrType(Ty, I, ElemTy, UnknownElemTypeI8);
893 } else if (auto *Ref = dyn_cast<GlobalValue>(I)) {
894 if (auto *Fn = dyn_cast<Function>(Ref)) {
895 Ty = SPIRV::getOriginalFunctionType(*Fn);
896 GR->addDeducedElementType(I, Ty);
897 } else {
898 Ty = deduceElementTypeByValueDeep(
899 Ref->getValueType(),
900 Ref->getNumOperands() > 0 ? Ref->getOperand(0) : nullptr, Visited,
901 UnknownElemTypeI8);
902 }
903 } else if (auto *Ref = dyn_cast<AddrSpaceCastInst>(I)) {
904 Type *RefTy = deduceElementTypeHelper(Ref->getPointerOperand(), Visited,
905 UnknownElemTypeI8);
906 maybeAssignPtrType(Ty, I, RefTy, UnknownElemTypeI8);
907 } else if (auto *Ref = dyn_cast<IntToPtrInst>(I)) {
908 maybeAssignPtrType(Ty, I, Ref->getDestTy(), UnknownElemTypeI8);
909 } else if (auto *Ref = dyn_cast<BitCastInst>(I)) {
910 if (Type *Src = Ref->getSrcTy(), *Dest = Ref->getDestTy();
911 isPointerTy(Src) && isPointerTy(Dest))
912 Ty = deduceElementTypeHelper(Ref->getOperand(0), Visited,
913 UnknownElemTypeI8);
914 } else if (auto *Ref = dyn_cast<AtomicCmpXchgInst>(I)) {
915 Value *Op = Ref->getNewValOperand();
916 if (isPointerTy(Op->getType()))
917 Ty = deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8);
918 } else if (auto *Ref = dyn_cast<AtomicRMWInst>(I)) {
919 Value *Op = Ref->getValOperand();
920 if (isPointerTy(Op->getType()))
921 Ty = deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8);
922 } else if (auto *Ref = dyn_cast<PHINode>(I)) {
923 Type *BestTy = nullptr;
924 unsigned MaxN = 1;
925 DenseMap<Type *, unsigned> PhiTys;
926 for (int i = Ref->getNumIncomingValues() - 1; i >= 0; --i) {
927 Ty = deduceElementTypeByUsersDeep(Ref->getIncomingValue(i), Visited,
928 UnknownElemTypeI8);
929 if (!Ty)
930 continue;
931 auto It = PhiTys.try_emplace(Ty, 1);
932 if (!It.second) {
933 ++It.first->second;
934 if (It.first->second > MaxN) {
935 MaxN = It.first->second;
936 BestTy = Ty;
937 }
938 }
939 }
940 if (BestTy)
941 Ty = BestTy;
942 } else if (auto *Ref = dyn_cast<SelectInst>(I)) {
943 for (Value *Op : {Ref->getTrueValue(), Ref->getFalseValue()}) {
944 Ty = deduceElementTypeByUsersDeep(Op, Visited, UnknownElemTypeI8);
945 if (Ty)
946 break;
947 }
948 } else if (auto *CI = dyn_cast<CallInst>(I)) {
949 static StringMap<unsigned> ResTypeByArg = {
950 {"to_global", 0},
951 {"to_local", 0},
952 {"to_private", 0},
953 {"__spirv_GenericCastToPtr_ToGlobal", 0},
954 {"__spirv_GenericCastToPtr_ToLocal", 0},
955 {"__spirv_GenericCastToPtr_ToPrivate", 0},
956 {"__spirv_GenericCastToPtrExplicit_ToGlobal", 0},
957 {"__spirv_GenericCastToPtrExplicit_ToLocal", 0},
958 {"__spirv_GenericCastToPtrExplicit_ToPrivate", 0}};
959 // TODO: maybe improve performance by caching demangled names
960
962 if (II && II->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
963 auto *HandleType = cast<TargetExtType>(II->getOperand(0)->getType());
964 if (HandleType->getTargetExtName() == "spirv.Image" ||
965 HandleType->getTargetExtName() == "spirv.SignedImage") {
966 for (User *U : II->users()) {
967 Ty = cast<Instruction>(U)->getAccessType();
968 if (Ty)
969 break;
970 }
971 } else if (HandleType->getTargetExtName() == "spirv.VulkanBuffer") {
972 // This call is supposed to index into an array
973 Ty = HandleType->getTypeParameter(0);
974 if (Ty->isArrayTy())
975 Ty = Ty->getArrayElementType();
976 else {
977 assert(Ty && Ty->isStructTy());
978 uint32_t Index = cast<ConstantInt>(II->getOperand(1))->getZExtValue();
979 Ty = cast<StructType>(Ty)->getElementType(Index);
980 }
982 } else {
983 llvm_unreachable("Unknown handle type for spv_resource_getpointer.");
984 }
985 } else if (II && II->getIntrinsicID() ==
986 Intrinsic::spv_generic_cast_to_ptr_explicit) {
987 Ty = deduceElementTypeHelper(CI->getArgOperand(0), Visited,
988 UnknownElemTypeI8);
989 } else if (Function *CalledF = CI->getCalledFunction()) {
990 std::string DemangledName =
991 getOclOrSpirvBuiltinDemangledName(CalledF->getName());
992 if (DemangledName.length() > 0)
993 DemangledName = SPIRV::lookupBuiltinNameHelper(DemangledName);
994 auto AsArgIt = ResTypeByArg.find(DemangledName);
995 if (AsArgIt != ResTypeByArg.end())
996 Ty = deduceElementTypeHelper(CI->getArgOperand(AsArgIt->second),
997 Visited, UnknownElemTypeI8);
998 else if (Type *KnownRetTy = GR->findDeducedElementType(CalledF))
999 Ty = KnownRetTy;
1000 }
1001 }
1002
1003 // remember the found relationship
1004 if (Ty && !IgnoreKnownType) {
1005 // specify nested types if needed, otherwise return unchanged
1007 }
1008
1009 return Ty;
1010}
1011
1012// Re-create a type of the value if it has untyped pointer fields, also nested.
1013// Return the original value type if no corrections of untyped pointer
1014// information is found or needed.
1015Type *SPIRVEmitIntrinsics::deduceNestedTypeHelper(User *U,
1016 bool UnknownElemTypeI8) {
1017 std::unordered_set<Value *> Visited;
1018 return deduceNestedTypeHelper(U, U->getType(), Visited, UnknownElemTypeI8);
1019}
1020
1021Type *SPIRVEmitIntrinsics::deduceNestedTypeHelper(
1022 User *U, Type *OrigTy, std::unordered_set<Value *> &Visited,
1023 bool UnknownElemTypeI8) {
1024 if (!U)
1025 return OrigTy;
1026
1027 // maybe already known
1028 if (Type *KnownTy = GR->findDeducedCompositeType(U))
1029 return KnownTy;
1030
1031 // maybe a cycle
1032 if (!Visited.insert(U).second)
1033 return OrigTy;
1034
1035 if (isa<StructType>(OrigTy)) {
1037 bool Change = false;
1038 for (unsigned i = 0; i < U->getNumOperands(); ++i) {
1039 Value *Op = U->getOperand(i);
1040 assert(Op && "Operands should not be null.");
1041 Type *OpTy = Op->getType();
1042 Type *Ty = OpTy;
1043 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
1044 if (Type *NestedTy =
1045 deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8))
1046 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
1047 } else {
1048 Ty = deduceNestedTypeHelper(dyn_cast<User>(Op), OpTy, Visited,
1049 UnknownElemTypeI8);
1050 }
1051 Tys.push_back(Ty);
1052 Change |= Ty != OpTy;
1053 }
1054 if (Change) {
1055 Type *NewTy = StructType::create(Tys);
1056 GR->addDeducedCompositeType(U, NewTy);
1057 return NewTy;
1058 }
1059 } else if (auto *ArrTy = dyn_cast<ArrayType>(OrigTy)) {
1060 if (Value *Op = U->getNumOperands() > 0 ? U->getOperand(0) : nullptr) {
1061 Type *OpTy = ArrTy->getElementType();
1062 Type *Ty = OpTy;
1063 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
1064 if (Type *NestedTy =
1065 deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8))
1066 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
1067 } else {
1068 Ty = deduceNestedTypeHelper(dyn_cast<User>(Op), OpTy, Visited,
1069 UnknownElemTypeI8);
1070 }
1071 if (Ty != OpTy) {
1072 Type *NewTy = ArrayType::get(Ty, ArrTy->getNumElements());
1073 GR->addDeducedCompositeType(U, NewTy);
1074 return NewTy;
1075 }
1076 }
1077 } else if (auto *VecTy = dyn_cast<VectorType>(OrigTy)) {
1078 if (Value *Op = U->getNumOperands() > 0 ? U->getOperand(0) : nullptr) {
1079 Type *OpTy = VecTy->getElementType();
1080 Type *Ty = OpTy;
1081 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
1082 if (Type *NestedTy =
1083 deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8))
1084 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
1085 } else {
1086 Ty = deduceNestedTypeHelper(dyn_cast<User>(Op), OpTy, Visited,
1087 UnknownElemTypeI8);
1088 }
1089 if (Ty != OpTy) {
1090 Type *NewTy = VectorType::get(Ty, VecTy->getElementCount());
1092 return NewTy;
1093 }
1094 }
1095 }
1096
1097 return OrigTy;
1098}
1099
1100Type *SPIRVEmitIntrinsics::deduceElementType(Value *I, bool UnknownElemTypeI8) {
1101 if (Type *Ty = deduceElementTypeHelper(I, UnknownElemTypeI8))
1102 return Ty;
1103 if (!UnknownElemTypeI8)
1104 return nullptr;
1105 insertTodoType(I);
1106 return IntegerType::getInt8Ty(I->getContext());
1107}
1108
1110 Value *PointerOperand) {
1111 Type *PointeeTy = GR->findDeducedElementType(PointerOperand);
1112 if (PointeeTy && !isUntypedPointerTy(PointeeTy))
1113 return nullptr;
1114 auto *PtrTy = dyn_cast<PointerType>(I->getType());
1115 if (!PtrTy)
1116 return I->getType();
1117 if (Type *NestedTy = GR->findDeducedElementType(I))
1118 return getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
1119 return nullptr;
1120}
1121
1122// Try to deduce element type for a call base. Returns false if this is an
1123// indirect function invocation, and true otherwise.
1124bool SPIRVEmitIntrinsics::deduceOperandElementTypeCalledFunction(
1125 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
1126 Type *&KnownElemTy, bool &Incomplete) {
1127 Function *CalledF = CI->getCalledFunction();
1128 if (!CalledF)
1129 return false;
1130 std::string DemangledName =
1132 if (DemangledName.length() > 0 &&
1133 !StringRef(DemangledName).starts_with("llvm.")) {
1134 const SPIRVSubtarget &ST = TM->getSubtarget<SPIRVSubtarget>(*CalledF);
1135 auto [Grp, Opcode, ExtNo] = SPIRV::mapBuiltinToOpcode(
1136 DemangledName, ST.getPreferredInstructionSet());
1137 if (Opcode == SPIRV::OpGroupAsyncCopy) {
1138 for (unsigned i = 0, PtrCnt = 0; i < CI->arg_size() && PtrCnt < 2; ++i) {
1139 Value *Op = CI->getArgOperand(i);
1140 if (!isPointerTy(Op->getType()))
1141 continue;
1142 ++PtrCnt;
1143 if (Type *ElemTy = GR->findDeducedElementType(Op))
1144 KnownElemTy = ElemTy; // src will rewrite dest if both are defined
1145 Ops.push_back(std::make_pair(Op, i));
1146 }
1147 } else if (Grp == SPIRV::Atomic || Grp == SPIRV::AtomicFloating) {
1148 if (CI->arg_size() == 0)
1149 return true;
1150 Value *Op = CI->getArgOperand(0);
1151 if (!isPointerTy(Op->getType()))
1152 return true;
1153 switch (Opcode) {
1154 case SPIRV::OpAtomicFAddEXT:
1155 case SPIRV::OpAtomicFMinEXT:
1156 case SPIRV::OpAtomicFMaxEXT:
1157 case SPIRV::OpAtomicLoad:
1158 case SPIRV::OpAtomicCompareExchangeWeak:
1159 case SPIRV::OpAtomicCompareExchange:
1160 case SPIRV::OpAtomicExchange:
1161 case SPIRV::OpAtomicIAdd:
1162 case SPIRV::OpAtomicISub:
1163 case SPIRV::OpAtomicOr:
1164 case SPIRV::OpAtomicXor:
1165 case SPIRV::OpAtomicAnd:
1166 case SPIRV::OpAtomicUMin:
1167 case SPIRV::OpAtomicUMax:
1168 case SPIRV::OpAtomicSMin:
1169 case SPIRV::OpAtomicSMax: {
1170 KnownElemTy = isPointerTy(CI->getType()) ? getAtomicElemTy(GR, CI, Op)
1171 : CI->getType();
1172 if (!KnownElemTy)
1173 return true;
1174 Incomplete = isTodoType(Op);
1175 Ops.push_back(std::make_pair(Op, 0));
1176 } break;
1177 case SPIRV::OpAtomicStore: {
1178 if (CI->arg_size() < 4)
1179 return true;
1180 Value *ValOp = CI->getArgOperand(3);
1181 KnownElemTy = isPointerTy(ValOp->getType())
1182 ? getAtomicElemTy(GR, CI, Op)
1183 : ValOp->getType();
1184 if (!KnownElemTy)
1185 return true;
1186 Incomplete = isTodoType(Op);
1187 Ops.push_back(std::make_pair(Op, 0));
1188 } break;
1189 }
1190 }
1191 }
1192 return true;
1193}
1194
1195// Try to deduce element type for a function pointer.
1196void SPIRVEmitIntrinsics::deduceOperandElementTypeFunctionPointer(
1197 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
1198 Type *&KnownElemTy, bool IsPostprocessing) {
1199 Value *Op = CI->getCalledOperand();
1200 if (!Op || !isPointerTy(Op->getType()))
1201 return;
1202 Ops.push_back(std::make_pair(Op, std::numeric_limits<unsigned>::max()));
1203 FunctionType *FTy = SPIRV::getOriginalFunctionType(*CI);
1204 bool IsNewFTy = false, IsIncomplete = false;
1206 for (auto &&[ParmIdx, Arg] : llvm::enumerate(CI->args())) {
1207 Type *ArgTy = Arg->getType();
1208 if (ArgTy->isPointerTy()) {
1209 if (Type *ElemTy = GR->findDeducedElementType(Arg)) {
1210 IsNewFTy = true;
1211 ArgTy = getTypedPointerWrapper(ElemTy, getPointerAddressSpace(ArgTy));
1212 if (isTodoType(Arg))
1213 IsIncomplete = true;
1214 } else {
1215 IsIncomplete = true;
1216 }
1217 } else {
1218 ArgTy = FTy->getFunctionParamType(ParmIdx);
1219 }
1220 ArgTys.push_back(ArgTy);
1221 }
1222 Type *RetTy = FTy->getReturnType();
1223 if (CI->getType()->isPointerTy()) {
1224 if (Type *ElemTy = GR->findDeducedElementType(CI)) {
1225 IsNewFTy = true;
1226 RetTy =
1228 if (isTodoType(CI))
1229 IsIncomplete = true;
1230 } else {
1231 IsIncomplete = true;
1232 }
1233 }
1234 if (!IsPostprocessing && IsIncomplete)
1235 insertTodoType(Op);
1236 KnownElemTy =
1237 IsNewFTy ? FunctionType::get(RetTy, ArgTys, FTy->isVarArg()) : FTy;
1238}
1239
1240bool SPIRVEmitIntrinsics::deduceOperandElementTypeFunctionRet(
1241 Instruction *I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
1242 const SmallPtrSet<Value *, 4> *AskOps, bool IsPostprocessing,
1243 Type *&KnownElemTy, Value *Op, Function *F) {
1244 KnownElemTy = GR->findDeducedElementType(F);
1245 if (KnownElemTy)
1246 return false;
1247 if (Type *OpElemTy = GR->findDeducedElementType(Op)) {
1248 OpElemTy = normalizeType(OpElemTy);
1249 GR->addDeducedElementType(F, OpElemTy);
1250 GR->addReturnType(
1251 F, TypedPointerType::get(OpElemTy,
1252 getPointerAddressSpace(F->getReturnType())));
1253 // non-recursive update of types in function uses
1254 DenseSet<std::pair<Value *, Value *>> VisitedSubst{std::make_pair(I, Op)};
1255 for (User *U : F->users()) {
1256 CallInst *CI = dyn_cast<CallInst>(U);
1257 if (!CI || CI->getCalledFunction() != F)
1258 continue;
1259 if (CallInst *AssignCI = GR->findAssignPtrTypeInstr(CI)) {
1260 if (Type *PrevElemTy = GR->findDeducedElementType(CI)) {
1261 GR->updateAssignType(AssignCI, CI,
1262 getNormalizedPoisonValue(OpElemTy));
1263 propagateElemType(CI, PrevElemTy, VisitedSubst);
1264 }
1265 }
1266 }
1267 // Non-recursive update of types in the function uncomplete returns.
1268 // This may happen just once per a function, the latch is a pair of
1269 // findDeducedElementType(F) / addDeducedElementType(F, ...).
1270 // With or without the latch it is a non-recursive call due to
1271 // IncompleteRets set to nullptr in this call.
1272 if (IncompleteRets)
1273 for (Instruction *IncompleteRetI : *IncompleteRets)
1274 deduceOperandElementType(IncompleteRetI, nullptr, AskOps,
1275 IsPostprocessing);
1276 } else if (IncompleteRets) {
1277 IncompleteRets->insert(I);
1278 }
1279 TypeValidated.insert(I);
1280 return true;
1281}
1282
1283// If the Instruction has Pointer operands with unresolved types, this function
1284// tries to deduce them. If the Instruction has Pointer operands with known
1285// types which differ from expected, this function tries to insert a bitcast to
1286// resolve the issue.
1287void SPIRVEmitIntrinsics::deduceOperandElementType(
1288 Instruction *I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
1289 const SmallPtrSet<Value *, 4> *AskOps, bool IsPostprocessing) {
1291 Type *KnownElemTy = nullptr;
1292 bool Incomplete = false;
1293 // look for known basic patterns of type inference
1294 if (auto *Ref = dyn_cast<PHINode>(I)) {
1295 if (!isPointerTy(I->getType()) ||
1296 !(KnownElemTy = GR->findDeducedElementType(I)))
1297 return;
1298 Incomplete = isTodoType(I);
1299 for (unsigned i = 0; i < Ref->getNumIncomingValues(); i++) {
1300 Value *Op = Ref->getIncomingValue(i);
1301 if (isPointerTy(Op->getType()))
1302 Ops.push_back(std::make_pair(Op, i));
1303 }
1304 } else if (auto *Ref = dyn_cast<AddrSpaceCastInst>(I)) {
1305 KnownElemTy = GR->findDeducedElementType(I);
1306 if (!KnownElemTy)
1307 return;
1308 Incomplete = isTodoType(I);
1309 Ops.push_back(std::make_pair(Ref->getPointerOperand(), 0));
1310 } else if (auto *Ref = dyn_cast<BitCastInst>(I)) {
1311 if (!isPointerTy(I->getType()))
1312 return;
1313 KnownElemTy = GR->findDeducedElementType(I);
1314 if (!KnownElemTy)
1315 return;
1316 Incomplete = isTodoType(I);
1317 Ops.push_back(std::make_pair(Ref->getOperand(0), 0));
1318 } else if (auto *Ref = dyn_cast<GetElementPtrInst>(I)) {
1319 if (GR->findDeducedElementType(Ref->getPointerOperand()))
1320 return;
1321 KnownElemTy = Ref->getSourceElementType();
1322 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1324 } else if (auto *Ref = dyn_cast<StructuredGEPInst>(I)) {
1325 if (GR->findDeducedElementType(Ref->getPointerOperand()))
1326 return;
1327 KnownElemTy = Ref->getBaseType();
1328 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1330 } else if (auto *Ref = dyn_cast<LoadInst>(I)) {
1331 KnownElemTy = I->getType();
1332 if (isUntypedPointerTy(KnownElemTy))
1333 return;
1334 Type *PointeeTy = GR->findDeducedElementType(Ref->getPointerOperand());
1335 if (PointeeTy && !isUntypedPointerTy(PointeeTy))
1336 return;
1337 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1339 } else if (auto *Ref = dyn_cast<StoreInst>(I)) {
1340 if (!(KnownElemTy =
1341 reconstructType(Ref->getValueOperand(), false, IsPostprocessing)))
1342 return;
1343 Type *PointeeTy = GR->findDeducedElementType(Ref->getPointerOperand());
1344 if (PointeeTy && !isUntypedPointerTy(PointeeTy))
1345 return;
1346 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1348 } else if (auto *Ref = dyn_cast<AtomicCmpXchgInst>(I)) {
1349 KnownElemTy = isPointerTy(I->getType())
1350 ? getAtomicElemTy(GR, I, Ref->getPointerOperand())
1351 : I->getType();
1352 if (!KnownElemTy)
1353 return;
1354 Incomplete = isTodoType(Ref->getPointerOperand());
1355 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1357 } else if (auto *Ref = dyn_cast<AtomicRMWInst>(I)) {
1358 KnownElemTy = isPointerTy(I->getType())
1359 ? getAtomicElemTy(GR, I, Ref->getPointerOperand())
1360 : I->getType();
1361 if (!KnownElemTy)
1362 return;
1363 Incomplete = isTodoType(Ref->getPointerOperand());
1364 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1366 } else if (auto *Ref = dyn_cast<SelectInst>(I)) {
1367 if (!isPointerTy(I->getType()) ||
1368 !(KnownElemTy = GR->findDeducedElementType(I)))
1369 return;
1370 Incomplete = isTodoType(I);
1371 for (unsigned i = 0; i < Ref->getNumOperands(); i++) {
1372 Value *Op = Ref->getOperand(i);
1373 if (isPointerTy(Op->getType()))
1374 Ops.push_back(std::make_pair(Op, i));
1375 }
1376 } else if (auto *Ref = dyn_cast<ReturnInst>(I)) {
1377 if (!isPointerTy(CurrF->getReturnType()))
1378 return;
1379 Value *Op = Ref->getReturnValue();
1380 if (!Op)
1381 return;
1382 if (deduceOperandElementTypeFunctionRet(I, IncompleteRets, AskOps,
1383 IsPostprocessing, KnownElemTy, Op,
1384 CurrF))
1385 return;
1386 Incomplete = isTodoType(CurrF);
1387 Ops.push_back(std::make_pair(Op, 0));
1388 } else if (auto *Ref = dyn_cast<ICmpInst>(I)) {
1389 if (!isPointerTy(Ref->getOperand(0)->getType()))
1390 return;
1391 Value *Op0 = Ref->getOperand(0);
1392 Value *Op1 = Ref->getOperand(1);
1393 bool Incomplete0 = isTodoType(Op0);
1394 bool Incomplete1 = isTodoType(Op1);
1395 Type *ElemTy1 = GR->findDeducedElementType(Op1);
1396 Type *ElemTy0 = (Incomplete0 && !Incomplete1 && ElemTy1)
1397 ? nullptr
1398 : GR->findDeducedElementType(Op0);
1399 if (ElemTy0) {
1400 KnownElemTy = ElemTy0;
1401 Incomplete = Incomplete0;
1402 Ops.push_back(std::make_pair(Op1, 1));
1403 } else if (ElemTy1) {
1404 KnownElemTy = ElemTy1;
1405 Incomplete = Incomplete1;
1406 Ops.push_back(std::make_pair(Op0, 0));
1407 }
1408 } else if (CallInst *CI = dyn_cast<CallInst>(I)) {
1409 if (!CI->isIndirectCall())
1410 deduceOperandElementTypeCalledFunction(CI, Ops, KnownElemTy, Incomplete);
1411 else if (HaveFunPtrs)
1412 deduceOperandElementTypeFunctionPointer(CI, Ops, KnownElemTy,
1413 IsPostprocessing);
1414 }
1415
1416 // There is no enough info to deduce types or all is valid.
1417 if (!KnownElemTy || Ops.size() == 0)
1418 return;
1419
1420 LLVMContext &Ctx = CurrF->getContext();
1421 IRBuilder<> B(Ctx);
1422 for (auto &OpIt : Ops) {
1423 Value *Op = OpIt.first;
1424 if (AskOps && !AskOps->contains(Op))
1425 continue;
1426 Type *AskTy = nullptr;
1427 CallInst *AskCI = nullptr;
1428 if (IsPostprocessing && AskOps) {
1429 AskTy = GR->findDeducedElementType(Op);
1430 AskCI = GR->findAssignPtrTypeInstr(Op);
1431 assert(AskTy && AskCI);
1432 }
1433 Type *Ty = AskTy ? AskTy : GR->findDeducedElementType(Op);
1434 if (Ty == KnownElemTy)
1435 continue;
1436 Value *OpTyVal = getNormalizedPoisonValue(KnownElemTy);
1437 Type *OpTy = Op->getType();
1438 if (Op->hasUseList() &&
1439 (!Ty || AskTy || isUntypedPointerTy(Ty) || isTodoType(Op))) {
1440 Type *PrevElemTy = GR->findDeducedElementType(Op);
1441 GR->addDeducedElementType(Op, normalizeType(KnownElemTy));
1442 // check if KnownElemTy is complete
1443 if (!Incomplete)
1444 eraseTodoType(Op);
1445 else if (!IsPostprocessing)
1446 insertTodoType(Op);
1447 // check if there is existing Intrinsic::spv_assign_ptr_type instruction
1448 CallInst *AssignCI = AskCI ? AskCI : GR->findAssignPtrTypeInstr(Op);
1449 if (AssignCI == nullptr) {
1450 Instruction *User = dyn_cast<Instruction>(Op->use_begin()->get());
1451 setInsertPointSkippingPhis(B, User ? User->getNextNode() : I);
1452 CallInst *CI =
1453 buildIntrWithMD(Intrinsic::spv_assign_ptr_type, {OpTy}, OpTyVal, Op,
1454 {B.getInt32(getPointerAddressSpace(OpTy))}, B);
1455 GR->addAssignPtrTypeInstr(Op, CI);
1456 } else {
1457 GR->updateAssignType(AssignCI, Op, OpTyVal);
1458 DenseSet<std::pair<Value *, Value *>> VisitedSubst{
1459 std::make_pair(I, Op)};
1460 propagateElemTypeRec(Op, KnownElemTy, PrevElemTy, VisitedSubst);
1461 }
1462 } else {
1463 eraseTodoType(Op);
1464 CallInst *PtrCastI =
1465 buildSpvPtrcast(I->getParent()->getParent(), Op, KnownElemTy);
1466 if (OpIt.second == std::numeric_limits<unsigned>::max())
1467 dyn_cast<CallInst>(I)->setCalledOperand(PtrCastI);
1468 else
1469 I->setOperand(OpIt.second, PtrCastI);
1470 }
1471 }
1472 TypeValidated.insert(I);
1473}
1474
1475void SPIRVEmitIntrinsics::replaceMemInstrUses(Instruction *Old,
1476 Instruction *New,
1477 IRBuilder<> &B) {
1478 while (!Old->user_empty()) {
1479 auto *U = Old->user_back();
1480 if (isAssignTypeInstr(U)) {
1481 B.SetInsertPoint(U);
1482 SmallVector<Value *, 2> Args = {New, U->getOperand(1)};
1483 CallInst *AssignCI =
1484 B.CreateIntrinsic(Intrinsic::spv_assign_type, {New->getType()}, Args);
1485 GR->addAssignPtrTypeInstr(New, AssignCI);
1486 U->eraseFromParent();
1487 } else if (isMemInstrToReplace(U) || isa<ReturnInst>(U) ||
1488 isa<CallInst>(U)) {
1489 U->replaceUsesOfWith(Old, New);
1490 } else {
1491 llvm_unreachable("illegal aggregate intrinsic user");
1492 }
1493 }
1494 New->copyMetadata(*Old);
1495 Old->eraseFromParent();
1496}
1497
1498void SPIRVEmitIntrinsics::preprocessUndefs(IRBuilder<> &B) {
1499 std::queue<Instruction *> Worklist;
1500 for (auto &I : instructions(CurrF))
1501 Worklist.push(&I);
1502
1503 while (!Worklist.empty()) {
1504 Instruction *I = Worklist.front();
1505 bool BPrepared = false;
1506 Worklist.pop();
1507
1508 for (auto &Op : I->operands()) {
1509 auto *AggrUndef = dyn_cast<UndefValue>(Op);
1510 if (!AggrUndef || !Op->getType()->isAggregateType())
1511 continue;
1512
1513 if (!BPrepared) {
1515 BPrepared = true;
1516 }
1517 auto *IntrUndef = B.CreateIntrinsic(Intrinsic::spv_undef, {});
1518 Worklist.push(IntrUndef);
1519 I->replaceUsesOfWith(Op, IntrUndef);
1520 AggrConsts[IntrUndef] = AggrUndef;
1521 AggrConstTypes[IntrUndef] = AggrUndef->getType();
1522 }
1523 }
1524}
1525
1526void SPIRVEmitIntrinsics::preprocessCompositeConstants(IRBuilder<> &B) {
1527 std::queue<Instruction *> Worklist;
1528 for (auto &I : instructions(CurrF))
1529 Worklist.push(&I);
1530
1531 while (!Worklist.empty()) {
1532 auto *I = Worklist.front();
1533 bool IsPhi = isa<PHINode>(I), BPrepared = false;
1534 assert(I);
1535 bool KeepInst = false;
1536 for (const auto &Op : I->operands()) {
1537 Constant *AggrConst = nullptr;
1538 Type *ResTy = nullptr;
1539 if (auto *COp = dyn_cast<ConstantVector>(Op)) {
1540 AggrConst = COp;
1541 ResTy = COp->getType();
1542 } else if (auto *COp = dyn_cast<ConstantArray>(Op)) {
1543 AggrConst = COp;
1544 ResTy = B.getInt32Ty();
1545 } else if (auto *COp = dyn_cast<ConstantStruct>(Op)) {
1546 AggrConst = COp;
1547 ResTy = B.getInt32Ty();
1548 } else if (auto *COp = dyn_cast<ConstantDataArray>(Op)) {
1549 AggrConst = COp;
1550 ResTy = B.getInt32Ty();
1551 } else if (auto *COp = dyn_cast<ConstantAggregateZero>(Op)) {
1552 AggrConst = COp;
1553 ResTy = Op->getType()->isVectorTy() ? COp->getType() : B.getInt32Ty();
1554 }
1555 if (AggrConst) {
1557 if (auto *COp = dyn_cast<ConstantDataSequential>(Op))
1558 for (unsigned i = 0; i < COp->getNumElements(); ++i)
1559 Args.push_back(COp->getElementAsConstant(i));
1560 else
1561 llvm::append_range(Args, AggrConst->operands());
1562 if (!BPrepared) {
1563 IsPhi ? B.SetInsertPointPastAllocas(I->getParent()->getParent())
1564 : B.SetInsertPoint(I);
1565 BPrepared = true;
1566 }
1567 auto *CI =
1568 B.CreateIntrinsic(Intrinsic::spv_const_composite, {ResTy}, {Args});
1569 Worklist.push(CI);
1570 I->replaceUsesOfWith(Op, CI);
1571 KeepInst = true;
1572 AggrConsts[CI] = AggrConst;
1573 AggrConstTypes[CI] = deduceNestedTypeHelper(AggrConst, false);
1574 }
1575 }
1576 if (!KeepInst)
1577 Worklist.pop();
1578 }
1579}
1580
1582 IRBuilder<> &B) {
1583 LLVMContext &Ctx = I->getContext();
1585 B.CreateIntrinsic(Intrinsic::spv_assign_decoration, {I->getType()},
1586 {I, MetadataAsValue::get(Ctx, MDNode::get(Ctx, {Node}))});
1587}
1588
1590 unsigned RoundingModeDeco,
1591 IRBuilder<> &B) {
1592 LLVMContext &Ctx = I->getContext();
1594 MDNode *RoundingModeNode = MDNode::get(
1595 Ctx,
1597 ConstantInt::get(Int32Ty, SPIRV::Decoration::FPRoundingMode)),
1598 ConstantAsMetadata::get(ConstantInt::get(Int32Ty, RoundingModeDeco))});
1599 createDecorationIntrinsic(I, RoundingModeNode, B);
1600}
1601
1603 IRBuilder<> &B) {
1604 LLVMContext &Ctx = I->getContext();
1606 MDNode *SaturatedConversionNode =
1607 MDNode::get(Ctx, {ConstantAsMetadata::get(ConstantInt::get(
1608 Int32Ty, SPIRV::Decoration::SaturatedConversion))});
1609 createDecorationIntrinsic(I, SaturatedConversionNode, B);
1610}
1611
1613 if (auto *CI = dyn_cast<CallInst>(I)) {
1614 if (Function *Fu = CI->getCalledFunction()) {
1615 if (Fu->isIntrinsic()) {
1616 unsigned const int IntrinsicId = Fu->getIntrinsicID();
1617 switch (IntrinsicId) {
1618 case Intrinsic::fptosi_sat:
1619 case Intrinsic::fptoui_sat:
1621 break;
1622 default:
1623 break;
1624 }
1625 }
1626 }
1627 }
1628}
1629
1630Instruction *SPIRVEmitIntrinsics::visitCallInst(CallInst &Call) {
1631 if (!Call.isInlineAsm())
1632 return &Call;
1633
1634 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
1635 LLVMContext &Ctx = CurrF->getContext();
1636
1637 Constant *TyC = UndefValue::get(IA->getFunctionType());
1638 MDString *ConstraintString = MDString::get(Ctx, IA->getConstraintString());
1640 buildMD(TyC),
1641 MetadataAsValue::get(Ctx, MDNode::get(Ctx, ConstraintString))};
1642 for (unsigned OpIdx = 0; OpIdx < Call.arg_size(); OpIdx++)
1643 Args.push_back(Call.getArgOperand(OpIdx));
1644
1646 B.SetInsertPoint(&Call);
1647 B.CreateIntrinsic(Intrinsic::spv_inline_asm, {Args});
1648 return &Call;
1649}
1650
1651// Use a tip about rounding mode to create a decoration.
1652void SPIRVEmitIntrinsics::useRoundingMode(ConstrainedFPIntrinsic *FPI,
1653 IRBuilder<> &B) {
1654 std::optional<RoundingMode> RM = FPI->getRoundingMode();
1655 if (!RM.has_value())
1656 return;
1657 unsigned RoundingModeDeco = std::numeric_limits<unsigned>::max();
1658 switch (RM.value()) {
1659 default:
1660 // ignore unknown rounding modes
1661 break;
1662 case RoundingMode::NearestTiesToEven:
1663 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTE;
1664 break;
1665 case RoundingMode::TowardNegative:
1666 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTN;
1667 break;
1668 case RoundingMode::TowardPositive:
1669 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTP;
1670 break;
1671 case RoundingMode::TowardZero:
1672 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTZ;
1673 break;
1674 case RoundingMode::Dynamic:
1675 case RoundingMode::NearestTiesToAway:
1676 // TODO: check if supported
1677 break;
1678 }
1679 if (RoundingModeDeco == std::numeric_limits<unsigned>::max())
1680 return;
1681 // Convert the tip about rounding mode into a decoration record.
1682 createRoundingModeDecoration(FPI, RoundingModeDeco, B);
1683}
1684
1685Instruction *SPIRVEmitIntrinsics::visitSwitchInst(SwitchInst &I) {
1686 BasicBlock *ParentBB = I.getParent();
1687 Function *F = ParentBB->getParent();
1688 IRBuilder<> B(ParentBB);
1689 B.SetInsertPoint(&I);
1690 SmallVector<Value *, 4> Args;
1692 Args.push_back(I.getCondition());
1693 BBCases.push_back(I.getDefaultDest());
1694 Args.push_back(BlockAddress::get(F, I.getDefaultDest()));
1695 for (auto &Case : I.cases()) {
1696 Args.push_back(Case.getCaseValue());
1697 BBCases.push_back(Case.getCaseSuccessor());
1698 Args.push_back(BlockAddress::get(F, Case.getCaseSuccessor()));
1699 }
1700 CallInst *NewI = B.CreateIntrinsic(Intrinsic::spv_switch,
1701 {I.getOperand(0)->getType()}, {Args});
1702 // remove switch to avoid its unneeded and undesirable unwrap into branches
1703 // and conditions
1704 replaceAllUsesWith(&I, NewI);
1705 I.eraseFromParent();
1706 // insert artificial and temporary instruction to preserve valid CFG,
1707 // it will be removed after IR translation pass
1708 B.SetInsertPoint(ParentBB);
1709 IndirectBrInst *BrI = B.CreateIndirectBr(
1710 Constant::getNullValue(PointerType::getUnqual(ParentBB->getContext())),
1711 BBCases.size());
1712 for (BasicBlock *BBCase : BBCases)
1713 BrI->addDestination(BBCase);
1714 return BrI;
1715}
1716
1718 if (GEP->getNumIndices() == 0)
1719 return false;
1720 if (const auto *CI = dyn_cast<ConstantInt>(GEP->getOperand(1))) {
1721 return CI->getZExtValue() == 0;
1722 }
1723 return false;
1724}
1725
1726Instruction *SPIRVEmitIntrinsics::visitIntrinsicInst(IntrinsicInst &I) {
1727 auto *SGEP = dyn_cast<StructuredGEPInst>(&I);
1728 if (!SGEP)
1729 return &I;
1730
1731 IRBuilder<> B(I.getParent());
1732 B.SetInsertPoint(&I);
1733 SmallVector<Type *, 2> Types = {I.getType(), I.getOperand(0)->getType()};
1734 SmallVector<Value *, 4> Args;
1735 Args.push_back(/* inBounds= */ B.getInt1(true));
1736 Args.push_back(I.getOperand(0));
1737 Args.push_back(/* zero index */ B.getInt32(0));
1738 for (unsigned J = 0; J < SGEP->getNumIndices(); ++J)
1739 Args.push_back(SGEP->getIndexOperand(J));
1740
1741 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_gep, Types, Args);
1742 replaceAllUsesWithAndErase(B, &I, NewI);
1743 return NewI;
1744}
1745
1746Instruction *SPIRVEmitIntrinsics::visitGetElementPtrInst(GetElementPtrInst &I) {
1747 IRBuilder<> B(I.getParent());
1748 B.SetInsertPoint(&I);
1749
1751 // Logical SPIR-V cannot use the OpPtrAccessChain instruction. If the first
1752 // index of the GEP is not 0, then we need to try to adjust it.
1753 //
1754 // If the GEP is doing byte addressing, try to rebuild the full access chain
1755 // from the type of the pointer.
1756 if (I.getSourceElementType() ==
1757 IntegerType::getInt8Ty(CurrF->getContext())) {
1758 return buildLogicalAccessChainFromGEP(I);
1759 }
1760
1761 // Look for the array-to-pointer decay. If this is the pattern
1762 // we can adjust the types, and prepend a 0 to the indices.
1763 Value *PtrOp = I.getPointerOperand();
1764 Type *SrcElemTy = I.getSourceElementType();
1765 Type *DeducedPointeeTy = deduceElementType(PtrOp, true);
1766
1767 if (auto *ArrTy = dyn_cast<ArrayType>(DeducedPointeeTy)) {
1768 if (ArrTy->getElementType() == SrcElemTy) {
1769 SmallVector<Value *> NewIndices;
1770 Type *FirstIdxType = I.getOperand(1)->getType();
1771 NewIndices.push_back(ConstantInt::get(FirstIdxType, 0));
1772 for (Value *Idx : I.indices())
1773 NewIndices.push_back(Idx);
1774
1775 SmallVector<Type *, 2> Types = {I.getType(), I.getPointerOperandType()};
1776 SmallVector<Value *, 4> Args;
1777 Args.push_back(B.getInt1(I.isInBounds()));
1778 Args.push_back(I.getPointerOperand());
1779 Args.append(NewIndices.begin(), NewIndices.end());
1780
1781 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args});
1782 replaceAllUsesWithAndErase(B, &I, NewI);
1783 return NewI;
1784 }
1785 }
1786 }
1787
1788 SmallVector<Type *, 2> Types = {I.getType(), I.getOperand(0)->getType()};
1789 SmallVector<Value *, 4> Args;
1790 Args.push_back(B.getInt1(I.isInBounds()));
1791 llvm::append_range(Args, I.operands());
1792 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args});
1793 replaceAllUsesWithAndErase(B, &I, NewI);
1794 return NewI;
1795}
1796
1797Instruction *SPIRVEmitIntrinsics::visitBitCastInst(BitCastInst &I) {
1798 IRBuilder<> B(I.getParent());
1799 B.SetInsertPoint(&I);
1800 Value *Source = I.getOperand(0);
1801
1802 // SPIR-V, contrary to LLVM 17+ IR, supports bitcasts between pointers of
1803 // varying element types. In case of IR coming from older versions of LLVM
1804 // such bitcasts do not provide sufficient information, should be just skipped
1805 // here, and handled in insertPtrCastOrAssignTypeInstr.
1806 if (isPointerTy(I.getType())) {
1807 replaceAllUsesWith(&I, Source);
1808 I.eraseFromParent();
1809 return nullptr;
1810 }
1811
1812 SmallVector<Type *, 2> Types = {I.getType(), Source->getType()};
1813 SmallVector<Value *> Args(I.op_begin(), I.op_end());
1814 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_bitcast, {Types}, {Args});
1815 replaceAllUsesWithAndErase(B, &I, NewI);
1816 return NewI;
1817}
1818
1819void SPIRVEmitIntrinsics::insertAssignPtrTypeTargetExt(
1820 TargetExtType *AssignedType, Value *V, IRBuilder<> &B) {
1821 Type *VTy = V->getType();
1822
1823 // A couple of sanity checks.
1824 assert((isPointerTy(VTy)) && "Expect a pointer type!");
1825 if (Type *ElemTy = getPointeeType(VTy))
1826 if (ElemTy != AssignedType)
1827 report_fatal_error("Unexpected pointer element type!");
1828
1829 CallInst *AssignCI = GR->findAssignPtrTypeInstr(V);
1830 if (!AssignCI) {
1831 GR->buildAssignType(B, AssignedType, V);
1832 return;
1833 }
1834
1835 Type *CurrentType =
1837 cast<MetadataAsValue>(AssignCI->getOperand(1))->getMetadata())
1838 ->getType();
1839 if (CurrentType == AssignedType)
1840 return;
1841
1842 // Builtin types cannot be redeclared or casted.
1843 if (CurrentType->isTargetExtTy())
1844 report_fatal_error("Type mismatch " + CurrentType->getTargetExtName() +
1845 "/" + AssignedType->getTargetExtName() +
1846 " for value " + V->getName(),
1847 false);
1848
1849 // Our previous guess about the type seems to be wrong, let's update
1850 // inferred type according to a new, more precise type information.
1851 GR->updateAssignType(AssignCI, V, getNormalizedPoisonValue(AssignedType));
1852}
1853
1854void SPIRVEmitIntrinsics::replacePointerOperandWithPtrCast(
1855 Instruction *I, Value *Pointer, Type *ExpectedElementType,
1856 unsigned OperandToReplace, IRBuilder<> &B) {
1857 TypeValidated.insert(I);
1858
1859 // Do not emit spv_ptrcast if Pointer's element type is ExpectedElementType
1860 Type *PointerElemTy = deduceElementTypeHelper(Pointer, false);
1861 if (PointerElemTy == ExpectedElementType ||
1862 isEquivalentTypes(PointerElemTy, ExpectedElementType))
1863 return;
1864
1866 Value *ExpectedElementVal = getNormalizedPoisonValue(ExpectedElementType);
1867 MetadataAsValue *VMD = buildMD(ExpectedElementVal);
1868 unsigned AddressSpace = getPointerAddressSpace(Pointer->getType());
1869 bool FirstPtrCastOrAssignPtrType = true;
1870
1871 // Do not emit new spv_ptrcast if equivalent one already exists or when
1872 // spv_assign_ptr_type already targets this pointer with the same element
1873 // type.
1874 if (Pointer->hasUseList()) {
1875 for (auto User : Pointer->users()) {
1876 auto *II = dyn_cast<IntrinsicInst>(User);
1877 if (!II ||
1878 (II->getIntrinsicID() != Intrinsic::spv_assign_ptr_type &&
1879 II->getIntrinsicID() != Intrinsic::spv_ptrcast) ||
1880 II->getOperand(0) != Pointer)
1881 continue;
1882
1883 // There is some spv_ptrcast/spv_assign_ptr_type already targeting this
1884 // pointer.
1885 FirstPtrCastOrAssignPtrType = false;
1886 if (II->getOperand(1) != VMD ||
1887 dyn_cast<ConstantInt>(II->getOperand(2))->getSExtValue() !=
1889 continue;
1890
1891 // The spv_ptrcast/spv_assign_ptr_type targeting this pointer is of the
1892 // same element type and address space.
1893 if (II->getIntrinsicID() != Intrinsic::spv_ptrcast)
1894 return;
1895
1896 // This must be a spv_ptrcast, do not emit new if this one has the same BB
1897 // as I. Otherwise, search for other spv_ptrcast/spv_assign_ptr_type.
1898 if (II->getParent() != I->getParent())
1899 continue;
1900
1901 I->setOperand(OperandToReplace, II);
1902 return;
1903 }
1904 }
1905
1906 if (isa<Instruction>(Pointer) || isa<Argument>(Pointer)) {
1907 if (FirstPtrCastOrAssignPtrType) {
1908 // If this would be the first spv_ptrcast, do not emit spv_ptrcast and
1909 // emit spv_assign_ptr_type instead.
1910 GR->buildAssignPtr(B, ExpectedElementType, Pointer);
1911 return;
1912 } else if (isTodoType(Pointer)) {
1913 eraseTodoType(Pointer);
1914 if (!isa<CallInst>(Pointer) && !isaGEP(Pointer) &&
1915 !isa<AllocaInst>(Pointer)) {
1916 // If this wouldn't be the first spv_ptrcast but existing type info is
1917 // uncomplete, update spv_assign_ptr_type arguments.
1918 if (CallInst *AssignCI = GR->findAssignPtrTypeInstr(Pointer)) {
1919 Type *PrevElemTy = GR->findDeducedElementType(Pointer);
1920 assert(PrevElemTy);
1921 DenseSet<std::pair<Value *, Value *>> VisitedSubst{
1922 std::make_pair(I, Pointer)};
1923 GR->updateAssignType(AssignCI, Pointer, ExpectedElementVal);
1924 propagateElemType(Pointer, PrevElemTy, VisitedSubst);
1925 } else {
1926 GR->buildAssignPtr(B, ExpectedElementType, Pointer);
1927 }
1928 return;
1929 }
1930 }
1931 }
1932
1933 // Emit spv_ptrcast
1934 SmallVector<Type *, 2> Types = {Pointer->getType(), Pointer->getType()};
1935 SmallVector<Value *, 2> Args = {Pointer, VMD, B.getInt32(AddressSpace)};
1936 auto *PtrCastI = B.CreateIntrinsic(Intrinsic::spv_ptrcast, {Types}, Args);
1937 I->setOperand(OperandToReplace, PtrCastI);
1938 // We need to set up a pointee type for the newly created spv_ptrcast.
1939 GR->buildAssignPtr(B, ExpectedElementType, PtrCastI);
1940}
1941
1942void SPIRVEmitIntrinsics::insertPtrCastOrAssignTypeInstr(Instruction *I,
1943 IRBuilder<> &B) {
1944 // Handle basic instructions:
1945 StoreInst *SI = dyn_cast<StoreInst>(I);
1946 if (IsKernelArgInt8(CurrF, SI)) {
1947 replacePointerOperandWithPtrCast(
1948 I, SI->getValueOperand(), IntegerType::getInt8Ty(CurrF->getContext()),
1949 0, B);
1950 }
1951 if (SI) {
1952 Value *Op = SI->getValueOperand();
1953 Value *Pointer = SI->getPointerOperand();
1954 Type *OpTy = Op->getType();
1955 if (auto *OpI = dyn_cast<Instruction>(Op))
1956 OpTy = restoreMutatedType(GR, OpI, OpTy);
1957 if (OpTy == Op->getType())
1958 OpTy = deduceElementTypeByValueDeep(OpTy, Op, false);
1959 replacePointerOperandWithPtrCast(I, Pointer, OpTy, 1, B);
1960 return;
1961 }
1962 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1963 Value *Pointer = LI->getPointerOperand();
1964 Type *OpTy = LI->getType();
1965 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
1966 if (Type *ElemTy = GR->findDeducedElementType(LI)) {
1967 OpTy = getTypedPointerWrapper(ElemTy, PtrTy->getAddressSpace());
1968 } else {
1969 Type *NewOpTy = OpTy;
1970 OpTy = deduceElementTypeByValueDeep(OpTy, LI, false);
1971 if (OpTy == NewOpTy)
1972 insertTodoType(Pointer);
1973 }
1974 }
1975 replacePointerOperandWithPtrCast(I, Pointer, OpTy, 0, B);
1976 return;
1977 }
1978 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
1979 Value *Pointer = GEPI->getPointerOperand();
1980 Type *OpTy = nullptr;
1981
1982 // Logical SPIR-V is not allowed to use Op*PtrAccessChain instructions. If
1983 // the first index is 0, then we can trivially lower to OpAccessChain. If
1984 // not we need to try to rewrite the GEP. We avoid adding a pointer cast at
1985 // this time, and will rewrite the GEP when visiting it.
1986 if (TM->getSubtargetImpl()->isLogicalSPIRV() && !isFirstIndexZero(GEPI)) {
1987 return;
1988 }
1989
1990 // In all cases, fall back to the GEP type if type scavenging failed.
1991 if (!OpTy)
1992 OpTy = GEPI->getSourceElementType();
1993
1994 replacePointerOperandWithPtrCast(I, Pointer, OpTy, 0, B);
1995 if (isNestedPointer(OpTy))
1996 insertTodoType(Pointer);
1997 return;
1998 }
1999
2000 // TODO: review and merge with existing logics:
2001 // Handle calls to builtins (non-intrinsics):
2002 CallInst *CI = dyn_cast<CallInst>(I);
2003 if (!CI || CI->isIndirectCall() || CI->isInlineAsm() ||
2005 return;
2006
2007 // collect information about formal parameter types
2008 std::string DemangledName =
2010 Function *CalledF = CI->getCalledFunction();
2011 SmallVector<Type *, 4> CalledArgTys;
2012 bool HaveTypes = false;
2013 for (unsigned OpIdx = 0; OpIdx < CalledF->arg_size(); ++OpIdx) {
2014 Argument *CalledArg = CalledF->getArg(OpIdx);
2015 Type *ArgType = CalledArg->getType();
2016 if (!isPointerTy(ArgType)) {
2017 CalledArgTys.push_back(nullptr);
2018 } else if (Type *ArgTypeElem = getPointeeType(ArgType)) {
2019 CalledArgTys.push_back(ArgTypeElem);
2020 HaveTypes = true;
2021 } else {
2022 Type *ElemTy = GR->findDeducedElementType(CalledArg);
2023 if (!ElemTy && hasPointeeTypeAttr(CalledArg))
2024 ElemTy = getPointeeTypeByAttr(CalledArg);
2025 if (!ElemTy) {
2026 ElemTy = getPointeeTypeByCallInst(DemangledName, CalledF, OpIdx);
2027 if (ElemTy) {
2028 GR->addDeducedElementType(CalledArg, normalizeType(ElemTy));
2029 } else {
2030 for (User *U : CalledArg->users()) {
2031 if (Instruction *Inst = dyn_cast<Instruction>(U)) {
2032 if ((ElemTy = deduceElementTypeHelper(Inst, false)) != nullptr)
2033 break;
2034 }
2035 }
2036 }
2037 }
2038 HaveTypes |= ElemTy != nullptr;
2039 CalledArgTys.push_back(ElemTy);
2040 }
2041 }
2042
2043 if (DemangledName.empty() && !HaveTypes)
2044 return;
2045
2046 for (unsigned OpIdx = 0; OpIdx < CI->arg_size(); OpIdx++) {
2047 Value *ArgOperand = CI->getArgOperand(OpIdx);
2048 if (!isPointerTy(ArgOperand->getType()))
2049 continue;
2050
2051 // Constants (nulls/undefs) are handled in insertAssignPtrTypeIntrs()
2052 if (!isa<Instruction>(ArgOperand) && !isa<Argument>(ArgOperand)) {
2053 // However, we may have assumptions about the formal argument's type and
2054 // may have a need to insert a ptr cast for the actual parameter of this
2055 // call.
2056 Argument *CalledArg = CalledF->getArg(OpIdx);
2057 if (!GR->findDeducedElementType(CalledArg))
2058 continue;
2059 }
2060
2061 Type *ExpectedType =
2062 OpIdx < CalledArgTys.size() ? CalledArgTys[OpIdx] : nullptr;
2063 if (!ExpectedType && !DemangledName.empty())
2064 ExpectedType = SPIRV::parseBuiltinCallArgumentBaseType(
2065 DemangledName, OpIdx, I->getContext());
2066 if (!ExpectedType || ExpectedType->isVoidTy())
2067 continue;
2068
2069 if (ExpectedType->isTargetExtTy() &&
2071 insertAssignPtrTypeTargetExt(cast<TargetExtType>(ExpectedType),
2072 ArgOperand, B);
2073 else
2074 replacePointerOperandWithPtrCast(CI, ArgOperand, ExpectedType, OpIdx, B);
2075 }
2076}
2077
2078Instruction *SPIRVEmitIntrinsics::visitInsertElementInst(InsertElementInst &I) {
2079 // If it's a <1 x Type> vector type, don't modify it. It's not a legal vector
2080 // type in LLT and IRTranslator will replace it by the scalar.
2081 if (isVector1(I.getType()))
2082 return &I;
2083
2084 SmallVector<Type *, 4> Types = {I.getType(), I.getOperand(0)->getType(),
2085 I.getOperand(1)->getType(),
2086 I.getOperand(2)->getType()};
2087 IRBuilder<> B(I.getParent());
2088 B.SetInsertPoint(&I);
2089 SmallVector<Value *> Args(I.op_begin(), I.op_end());
2090 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_insertelt, {Types}, {Args});
2091 replaceAllUsesWithAndErase(B, &I, NewI);
2092 return NewI;
2093}
2094
2096SPIRVEmitIntrinsics::visitExtractElementInst(ExtractElementInst &I) {
2097 // If it's a <1 x Type> vector type, don't modify it. It's not a legal vector
2098 // type in LLT and IRTranslator will replace it by the scalar.
2099 if (isVector1(I.getVectorOperandType()))
2100 return &I;
2101
2102 IRBuilder<> B(I.getParent());
2103 B.SetInsertPoint(&I);
2104 SmallVector<Type *, 3> Types = {I.getType(), I.getVectorOperandType(),
2105 I.getIndexOperand()->getType()};
2106 SmallVector<Value *, 2> Args = {I.getVectorOperand(), I.getIndexOperand()};
2107 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_extractelt, {Types}, {Args});
2108 replaceAllUsesWithAndErase(B, &I, NewI);
2109 return NewI;
2110}
2111
2112Instruction *SPIRVEmitIntrinsics::visitInsertValueInst(InsertValueInst &I) {
2113 IRBuilder<> B(I.getParent());
2114 B.SetInsertPoint(&I);
2115 SmallVector<Type *, 1> Types = {I.getInsertedValueOperand()->getType()};
2117 Value *AggregateOp = I.getAggregateOperand();
2118 if (isa<UndefValue>(AggregateOp))
2119 Args.push_back(UndefValue::get(B.getInt32Ty()));
2120 else
2121 Args.push_back(AggregateOp);
2122 Args.push_back(I.getInsertedValueOperand());
2123 for (auto &Op : I.indices())
2124 Args.push_back(B.getInt32(Op));
2125 Instruction *NewI =
2126 B.CreateIntrinsic(Intrinsic::spv_insertv, {Types}, {Args});
2127 replaceMemInstrUses(&I, NewI, B);
2128 return NewI;
2129}
2130
2131Instruction *SPIRVEmitIntrinsics::visitExtractValueInst(ExtractValueInst &I) {
2132 if (I.getAggregateOperand()->getType()->isAggregateType())
2133 return &I;
2134 IRBuilder<> B(I.getParent());
2135 B.SetInsertPoint(&I);
2136 SmallVector<Value *> Args(I.operands());
2137 for (auto &Op : I.indices())
2138 Args.push_back(B.getInt32(Op));
2139 auto *NewI =
2140 B.CreateIntrinsic(Intrinsic::spv_extractv, {I.getType()}, {Args});
2141 replaceAllUsesWithAndErase(B, &I, NewI);
2142 return NewI;
2143}
2144
2145Instruction *SPIRVEmitIntrinsics::visitLoadInst(LoadInst &I) {
2146 if (!I.getType()->isAggregateType())
2147 return &I;
2148 IRBuilder<> B(I.getParent());
2149 B.SetInsertPoint(&I);
2150 TrackConstants = false;
2151 const auto *TLI = TM->getSubtargetImpl()->getTargetLowering();
2153 TLI->getLoadMemOperandFlags(I, CurrF->getDataLayout());
2154 auto *NewI =
2155 B.CreateIntrinsic(Intrinsic::spv_load, {I.getOperand(0)->getType()},
2156 {I.getPointerOperand(), B.getInt16(Flags),
2157 B.getInt32(I.getAlign().value())});
2158 replaceMemInstrUses(&I, NewI, B);
2159 return NewI;
2160}
2161
2162Instruction *SPIRVEmitIntrinsics::visitStoreInst(StoreInst &I) {
2163 if (!AggrStores.contains(&I))
2164 return &I;
2165 IRBuilder<> B(I.getParent());
2166 B.SetInsertPoint(&I);
2167 TrackConstants = false;
2168 const auto *TLI = TM->getSubtargetImpl()->getTargetLowering();
2170 TLI->getStoreMemOperandFlags(I, CurrF->getDataLayout());
2171 auto *PtrOp = I.getPointerOperand();
2172
2173 if (I.getValueOperand()->getType()->isAggregateType()) {
2174 // It is possible that what used to be an ExtractValueInst has been replaced
2175 // with a call to the spv_extractv intrinsic, and that said call hasn't
2176 // had its return type replaced with i32 during the dedicated pass (because
2177 // it was emitted later); we have to handle this here, because IRTranslator
2178 // cannot deal with multi-register types at the moment.
2179 CallBase *CB = dyn_cast<CallBase>(I.getValueOperand());
2180 assert(CB && CB->getIntrinsicID() == Intrinsic::spv_extractv &&
2181 "Unexpected argument of aggregate type, should be spv_extractv!");
2182 CB->mutateType(B.getInt32Ty());
2183 }
2184
2185 auto *NewI = B.CreateIntrinsic(
2186 Intrinsic::spv_store, {I.getValueOperand()->getType(), PtrOp->getType()},
2187 {I.getValueOperand(), PtrOp, B.getInt16(Flags),
2188 B.getInt32(I.getAlign().value())});
2189 NewI->copyMetadata(I);
2190 I.eraseFromParent();
2191 return NewI;
2192}
2193
2194Instruction *SPIRVEmitIntrinsics::visitAllocaInst(AllocaInst &I) {
2195 Value *ArraySize = nullptr;
2196 if (I.isArrayAllocation()) {
2197 const SPIRVSubtarget *STI = TM->getSubtargetImpl(*I.getFunction());
2198 if (!STI->canUseExtension(
2199 SPIRV::Extension::SPV_INTEL_variable_length_array))
2201 "array allocation: this instruction requires the following "
2202 "SPIR-V extension: SPV_INTEL_variable_length_array",
2203 false);
2204 ArraySize = I.getArraySize();
2205 }
2206 IRBuilder<> B(I.getParent());
2207 B.SetInsertPoint(&I);
2208 TrackConstants = false;
2209 Type *PtrTy = I.getType();
2210 auto *NewI =
2211 ArraySize
2212 ? B.CreateIntrinsic(Intrinsic::spv_alloca_array,
2213 {PtrTy, ArraySize->getType()},
2214 {ArraySize, B.getInt32(I.getAlign().value())})
2215 : B.CreateIntrinsic(Intrinsic::spv_alloca, {PtrTy},
2216 {B.getInt32(I.getAlign().value())});
2217 replaceAllUsesWithAndErase(B, &I, NewI);
2218 return NewI;
2219}
2220
2221Instruction *SPIRVEmitIntrinsics::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
2222 assert(I.getType()->isAggregateType() && "Aggregate result is expected");
2223 IRBuilder<> B(I.getParent());
2224 B.SetInsertPoint(&I);
2225 SmallVector<Value *> Args(I.operands());
2226 Args.push_back(B.getInt32(
2227 static_cast<uint32_t>(getMemScope(I.getContext(), I.getSyncScopeID()))));
2228 Args.push_back(B.getInt32(
2229 static_cast<uint32_t>(getMemSemantics(I.getSuccessOrdering()))));
2230 Args.push_back(B.getInt32(
2231 static_cast<uint32_t>(getMemSemantics(I.getFailureOrdering()))));
2232 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_cmpxchg,
2233 {I.getPointerOperand()->getType()}, {Args});
2234 replaceMemInstrUses(&I, NewI, B);
2235 return NewI;
2236}
2237
2238Instruction *SPIRVEmitIntrinsics::visitUnreachableInst(UnreachableInst &I) {
2239 IRBuilder<> B(I.getParent());
2240 B.SetInsertPoint(&I);
2241 B.CreateIntrinsic(Intrinsic::spv_unreachable, {});
2242 return &I;
2243}
2244
2245static bool
2246shouldEmitIntrinsicsForGlobalValue(const GlobalVariableUsers &GVUsers,
2247 const GlobalVariable &GV,
2248 const Function *F) {
2249 // Skip special artificial variables.
2250 static const StringSet<> ArtificialGlobals{"llvm.global.annotations",
2251 "llvm.compiler.used", "llvm.used"};
2252
2253 if (ArtificialGlobals.contains(GV.getName()))
2254 return false;
2255
2256 auto &UserFunctions = GVUsers.getTransitiveUserFunctions(GV);
2257 if (UserFunctions.contains(F))
2258 return true;
2259
2260 // Do not emit the intrinsics in this function, it's going to be emitted on
2261 // the functions that reference it.
2262 if (!UserFunctions.empty())
2263 return false;
2264
2265 // Emit definitions for globals that are not referenced by any function on the
2266 // first function definition.
2267 const Module &M = *F->getParent();
2268 const Function &FirstDefinition = *M.getFunctionDefs().begin();
2269 return F == &FirstDefinition;
2270}
2271
2272void SPIRVEmitIntrinsics::processGlobalValue(GlobalVariable &GV,
2273 IRBuilder<> &B) {
2274
2275 if (!shouldEmitIntrinsicsForGlobalValue(GVUsers, GV, CurrF))
2276 return;
2277
2278 Constant *Init = nullptr;
2279 if (hasInitializer(&GV)) {
2280 // Deduce element type and store results in Global Registry.
2281 // Result is ignored, because TypedPointerType is not supported
2282 // by llvm IR general logic.
2283 deduceElementTypeHelper(&GV, false);
2284 Init = GV.getInitializer();
2285 Type *Ty = isAggrConstForceInt32(Init) ? B.getInt32Ty() : Init->getType();
2286 Constant *Const = isAggrConstForceInt32(Init) ? B.getInt32(1) : Init;
2287 auto *InitInst = B.CreateIntrinsic(Intrinsic::spv_init_global,
2288 {GV.getType(), Ty}, {&GV, Const});
2289 InitInst->setArgOperand(1, Init);
2290 }
2291 if (!Init && GV.use_empty())
2292 B.CreateIntrinsic(Intrinsic::spv_unref_global, GV.getType(), &GV);
2293}
2294
2295// Return true, if we can't decide what is the pointee type now and will get
2296// back to the question later. Return false is spv_assign_ptr_type is not needed
2297// or can be inserted immediately.
2298bool SPIRVEmitIntrinsics::insertAssignPtrTypeIntrs(Instruction *I,
2299 IRBuilder<> &B,
2300 bool UnknownElemTypeI8) {
2302 if (!isPointerTy(I->getType()) || !requireAssignType(I))
2303 return false;
2304
2306 if (Type *ElemTy = deduceElementType(I, UnknownElemTypeI8)) {
2307 GR->buildAssignPtr(B, ElemTy, I);
2308 return false;
2309 }
2310 return true;
2311}
2312
2313void SPIRVEmitIntrinsics::insertAssignTypeIntrs(Instruction *I,
2314 IRBuilder<> &B) {
2315 // TODO: extend the list of functions with known result types
2316 static StringMap<unsigned> ResTypeWellKnown = {
2317 {"async_work_group_copy", WellKnownTypes::Event},
2318 {"async_work_group_strided_copy", WellKnownTypes::Event},
2319 {"__spirv_GroupAsyncCopy", WellKnownTypes::Event}};
2320
2322
2323 bool IsKnown = false;
2324 if (auto *CI = dyn_cast<CallInst>(I)) {
2325 if (!CI->isIndirectCall() && !CI->isInlineAsm() &&
2326 CI->getCalledFunction() && !CI->getCalledFunction()->isIntrinsic()) {
2327 Function *CalledF = CI->getCalledFunction();
2328 std::string DemangledName =
2330 FPDecorationId DecorationId = FPDecorationId::NONE;
2331 if (DemangledName.length() > 0)
2332 DemangledName =
2333 SPIRV::lookupBuiltinNameHelper(DemangledName, &DecorationId);
2334 auto ResIt = ResTypeWellKnown.find(DemangledName);
2335 if (ResIt != ResTypeWellKnown.end()) {
2336 IsKnown = true;
2338 switch (ResIt->second) {
2339 case WellKnownTypes::Event:
2340 GR->buildAssignType(
2341 B, TargetExtType::get(I->getContext(), "spirv.Event"), I);
2342 break;
2343 }
2344 }
2345 // check if a floating rounding mode or saturation info is present
2346 switch (DecorationId) {
2347 default:
2348 break;
2349 case FPDecorationId::SAT:
2351 break;
2352 case FPDecorationId::RTE:
2354 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTE, B);
2355 break;
2356 case FPDecorationId::RTZ:
2358 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTZ, B);
2359 break;
2360 case FPDecorationId::RTP:
2362 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTP, B);
2363 break;
2364 case FPDecorationId::RTN:
2366 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTN, B);
2367 break;
2368 }
2369 }
2370 }
2371
2372 Type *Ty = I->getType();
2373 if (!IsKnown && !Ty->isVoidTy() && !isPointerTy(Ty) && requireAssignType(I)) {
2375 Type *TypeToAssign = Ty;
2376 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
2377 if (II->getIntrinsicID() == Intrinsic::spv_const_composite ||
2378 II->getIntrinsicID() == Intrinsic::spv_undef) {
2379 auto It = AggrConstTypes.find(II);
2380 if (It == AggrConstTypes.end())
2381 report_fatal_error("Unknown composite intrinsic type");
2382 TypeToAssign = It->second;
2383 }
2384 }
2385 TypeToAssign = restoreMutatedType(GR, I, TypeToAssign);
2386 GR->buildAssignType(B, TypeToAssign, I);
2387 }
2388 for (const auto &Op : I->operands()) {
2390 // Check GetElementPtrConstantExpr case.
2392 (isa<GEPOperator>(Op) ||
2393 (cast<ConstantExpr>(Op)->getOpcode() == CastInst::IntToPtr)))) {
2395 Type *OpTy = Op->getType();
2396 if (isa<UndefValue>(Op) && OpTy->isAggregateType()) {
2397 CallInst *AssignCI =
2398 buildIntrWithMD(Intrinsic::spv_assign_type, {B.getInt32Ty()}, Op,
2399 UndefValue::get(B.getInt32Ty()), {}, B);
2400 GR->addAssignPtrTypeInstr(Op, AssignCI);
2401 } else if (!isa<Instruction>(Op)) {
2402 Type *OpTy = Op->getType();
2403 Type *OpTyElem = getPointeeType(OpTy);
2404 if (OpTyElem) {
2405 GR->buildAssignPtr(B, OpTyElem, Op);
2406 } else if (isPointerTy(OpTy)) {
2407 Type *ElemTy = GR->findDeducedElementType(Op);
2408 GR->buildAssignPtr(B, ElemTy ? ElemTy : deduceElementType(Op, true),
2409 Op);
2410 } else {
2411 Value *OpTyVal = Op;
2412 if (OpTy->isTargetExtTy()) {
2413 // We need to do this in order to be consistent with how target ext
2414 // types are handled in `processInstrAfterVisit`
2415 OpTyVal = getNormalizedPoisonValue(OpTy);
2416 }
2417 CallInst *AssignCI =
2418 buildIntrWithMD(Intrinsic::spv_assign_type, {OpTy},
2419 getNormalizedPoisonValue(OpTy), OpTyVal, {}, B);
2420 GR->addAssignPtrTypeInstr(OpTyVal, AssignCI);
2421 }
2422 }
2423 }
2424 }
2425}
2426
2427bool SPIRVEmitIntrinsics::shouldTryToAddMemAliasingDecoration(
2428 Instruction *Inst) {
2429 const SPIRVSubtarget *STI = TM->getSubtargetImpl(*Inst->getFunction());
2430 if (!STI->canUseExtension(SPIRV::Extension::SPV_INTEL_memory_access_aliasing))
2431 return false;
2432 // Add aliasing decorations to internal load and store intrinsics
2433 // and atomic instructions, skipping atomic store as it won't have ID to
2434 // attach the decoration.
2435 CallInst *CI = dyn_cast<CallInst>(Inst);
2436 if (!CI)
2437 return false;
2438 if (Function *Fun = CI->getCalledFunction()) {
2439 if (Fun->isIntrinsic()) {
2440 switch (Fun->getIntrinsicID()) {
2441 case Intrinsic::spv_load:
2442 case Intrinsic::spv_store:
2443 return true;
2444 default:
2445 return false;
2446 }
2447 }
2449 const std::string Prefix = "__spirv_Atomic";
2450 const bool IsAtomic = Name.find(Prefix) == 0;
2451
2452 if (!Fun->getReturnType()->isVoidTy() && IsAtomic)
2453 return true;
2454 }
2455 return false;
2456}
2457
2458void SPIRVEmitIntrinsics::insertSpirvDecorations(Instruction *I,
2459 IRBuilder<> &B) {
2460 if (MDNode *MD = I->getMetadata("spirv.Decorations")) {
2462 B.CreateIntrinsic(Intrinsic::spv_assign_decoration, {I->getType()},
2463 {I, MetadataAsValue::get(I->getContext(), MD)});
2464 }
2465 // Lower alias.scope/noalias metadata
2466 {
2467 auto processMemAliasingDecoration = [&](unsigned Kind) {
2468 if (MDNode *AliasListMD = I->getMetadata(Kind)) {
2469 if (shouldTryToAddMemAliasingDecoration(I)) {
2470 uint32_t Dec = Kind == LLVMContext::MD_alias_scope
2471 ? SPIRV::Decoration::AliasScopeINTEL
2472 : SPIRV::Decoration::NoAliasINTEL;
2474 I, ConstantInt::get(B.getInt32Ty(), Dec),
2475 MetadataAsValue::get(I->getContext(), AliasListMD)};
2477 B.CreateIntrinsic(Intrinsic::spv_assign_aliasing_decoration,
2478 {I->getType()}, {Args});
2479 }
2480 }
2481 };
2482 processMemAliasingDecoration(LLVMContext::MD_alias_scope);
2483 processMemAliasingDecoration(LLVMContext::MD_noalias);
2484 }
2485 // MD_fpmath
2486 if (MDNode *MD = I->getMetadata(LLVMContext::MD_fpmath)) {
2487 const SPIRVSubtarget *STI = TM->getSubtargetImpl(*I->getFunction());
2488 bool AllowFPMaxError =
2489 STI->canUseExtension(SPIRV::Extension::SPV_INTEL_fp_max_error);
2490 if (!AllowFPMaxError)
2491 return;
2492
2494 B.CreateIntrinsic(Intrinsic::spv_assign_fpmaxerror_decoration,
2495 {I->getType()},
2496 {I, MetadataAsValue::get(I->getContext(), MD)});
2497 }
2498}
2499
2501 const Module &M,
2503 &FPFastMathDefaultInfoMap,
2504 Function *F) {
2505 auto it = FPFastMathDefaultInfoMap.find(F);
2506 if (it != FPFastMathDefaultInfoMap.end())
2507 return it->second;
2508
2509 // If the map does not contain the entry, create a new one. Initialize it to
2510 // contain all 3 elements sorted by bit width of target type: {half, float,
2511 // double}.
2512 SPIRV::FPFastMathDefaultInfoVector FPFastMathDefaultInfoVec;
2513 FPFastMathDefaultInfoVec.emplace_back(Type::getHalfTy(M.getContext()),
2514 SPIRV::FPFastMathMode::None);
2515 FPFastMathDefaultInfoVec.emplace_back(Type::getFloatTy(M.getContext()),
2516 SPIRV::FPFastMathMode::None);
2517 FPFastMathDefaultInfoVec.emplace_back(Type::getDoubleTy(M.getContext()),
2518 SPIRV::FPFastMathMode::None);
2519 return FPFastMathDefaultInfoMap[F] = std::move(FPFastMathDefaultInfoVec);
2520}
2521
2523 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec,
2524 const Type *Ty) {
2525 size_t BitWidth = Ty->getScalarSizeInBits();
2526 int Index =
2528 BitWidth);
2529 assert(Index >= 0 && Index < 3 &&
2530 "Expected FPFastMathDefaultInfo for half, float, or double");
2531 assert(FPFastMathDefaultInfoVec.size() == 3 &&
2532 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2533 return FPFastMathDefaultInfoVec[Index];
2534}
2535
2536void SPIRVEmitIntrinsics::insertConstantsForFPFastMathDefault(Module &M) {
2537 const SPIRVSubtarget *ST = TM->getSubtargetImpl();
2538 if (!ST->canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2))
2539 return;
2540
2541 // Store the FPFastMathDefaultInfo in the FPFastMathDefaultInfoMap.
2542 // We need the entry point (function) as the key, and the target
2543 // type and flags as the value.
2544 // We also need to check ContractionOff and SignedZeroInfNanPreserve
2545 // execution modes, as they are now deprecated and must be replaced
2546 // with FPFastMathDefaultInfo.
2547 auto Node = M.getNamedMetadata("spirv.ExecutionMode");
2548 if (!Node) {
2549 if (!M.getNamedMetadata("opencl.enable.FP_CONTRACT")) {
2550 // This requires emitting ContractionOff. However, because
2551 // ContractionOff is now deprecated, we need to replace it with
2552 // FPFastMathDefaultInfo with FP Fast Math Mode bitmask set to all 0.
2553 // We need to create the constant for that.
2554
2555 // Create constant instruction with the bitmask flags.
2556 Constant *InitValue =
2557 ConstantInt::get(Type::getInt32Ty(M.getContext()), 0);
2558 // TODO: Reuse constant if there is one already with the required
2559 // value.
2560 [[maybe_unused]] GlobalVariable *GV =
2561 new GlobalVariable(M, // Module
2562 Type::getInt32Ty(M.getContext()), // Type
2563 true, // isConstant
2565 InitValue // Initializer
2566 );
2567 }
2568 return;
2569 }
2570
2571 // The table maps function pointers to their default FP fast math info. It
2572 // can be assumed that the SmallVector is sorted by the bit width of the
2573 // type. The first element is the smallest bit width, and the last element
2574 // is the largest bit width, therefore, we will have {half, float, double}
2575 // in the order of their bit widths.
2576 DenseMap<Function *, SPIRV::FPFastMathDefaultInfoVector>
2577 FPFastMathDefaultInfoMap;
2578
2579 for (unsigned i = 0; i < Node->getNumOperands(); i++) {
2580 MDNode *MDN = cast<MDNode>(Node->getOperand(i));
2581 assert(MDN->getNumOperands() >= 2 && "Expected at least 2 operands");
2583 cast<ConstantAsMetadata>(MDN->getOperand(0))->getValue());
2584 const auto EM =
2586 cast<ConstantAsMetadata>(MDN->getOperand(1))->getValue())
2587 ->getZExtValue();
2588 if (EM == SPIRV::ExecutionMode::FPFastMathDefault) {
2589 assert(MDN->getNumOperands() == 4 &&
2590 "Expected 4 operands for FPFastMathDefault");
2591 const Type *T = cast<ValueAsMetadata>(MDN->getOperand(2))->getType();
2592 unsigned Flags =
2594 cast<ConstantAsMetadata>(MDN->getOperand(3))->getValue())
2595 ->getZExtValue();
2596 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2597 getOrCreateFPFastMathDefaultInfoVec(M, FPFastMathDefaultInfoMap, F);
2598 SPIRV::FPFastMathDefaultInfo &Info =
2599 getFPFastMathDefaultInfo(FPFastMathDefaultInfoVec, T);
2600 Info.FastMathFlags = Flags;
2601 Info.FPFastMathDefault = true;
2602 } else if (EM == SPIRV::ExecutionMode::ContractionOff) {
2603 assert(MDN->getNumOperands() == 2 &&
2604 "Expected no operands for ContractionOff");
2605
2606 // We need to save this info for every possible FP type, i.e. {half,
2607 // float, double, fp128}.
2608 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2609 getOrCreateFPFastMathDefaultInfoVec(M, FPFastMathDefaultInfoMap, F);
2610 for (SPIRV::FPFastMathDefaultInfo &Info : FPFastMathDefaultInfoVec) {
2611 Info.ContractionOff = true;
2612 }
2613 } else if (EM == SPIRV::ExecutionMode::SignedZeroInfNanPreserve) {
2614 assert(MDN->getNumOperands() == 3 &&
2615 "Expected 1 operand for SignedZeroInfNanPreserve");
2616 unsigned TargetWidth =
2618 cast<ConstantAsMetadata>(MDN->getOperand(2))->getValue())
2619 ->getZExtValue();
2620 // We need to save this info only for the FP type with TargetWidth.
2621 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2622 getOrCreateFPFastMathDefaultInfoVec(M, FPFastMathDefaultInfoMap, F);
2625 assert(Index >= 0 && Index < 3 &&
2626 "Expected FPFastMathDefaultInfo for half, float, or double");
2627 assert(FPFastMathDefaultInfoVec.size() == 3 &&
2628 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2629 FPFastMathDefaultInfoVec[Index].SignedZeroInfNanPreserve = true;
2630 }
2631 }
2632
2633 std::unordered_map<unsigned, GlobalVariable *> GlobalVars;
2634 for (auto &[Func, FPFastMathDefaultInfoVec] : FPFastMathDefaultInfoMap) {
2635 if (FPFastMathDefaultInfoVec.empty())
2636 continue;
2637
2638 for (const SPIRV::FPFastMathDefaultInfo &Info : FPFastMathDefaultInfoVec) {
2639 assert(Info.Ty && "Expected target type for FPFastMathDefaultInfo");
2640 // Skip if none of the execution modes was used.
2641 unsigned Flags = Info.FastMathFlags;
2642 if (Flags == SPIRV::FPFastMathMode::None && !Info.ContractionOff &&
2643 !Info.SignedZeroInfNanPreserve && !Info.FPFastMathDefault)
2644 continue;
2645
2646 // Check if flags are compatible.
2647 if (Info.ContractionOff && (Flags & SPIRV::FPFastMathMode::AllowContract))
2648 report_fatal_error("Conflicting FPFastMathFlags: ContractionOff "
2649 "and AllowContract");
2650
2651 if (Info.SignedZeroInfNanPreserve &&
2652 !(Flags &
2653 (SPIRV::FPFastMathMode::NotNaN | SPIRV::FPFastMathMode::NotInf |
2654 SPIRV::FPFastMathMode::NSZ))) {
2655 if (Info.FPFastMathDefault)
2656 report_fatal_error("Conflicting FPFastMathFlags: "
2657 "SignedZeroInfNanPreserve but at least one of "
2658 "NotNaN/NotInf/NSZ is enabled.");
2659 }
2660
2661 if ((Flags & SPIRV::FPFastMathMode::AllowTransform) &&
2662 !((Flags & SPIRV::FPFastMathMode::AllowReassoc) &&
2663 (Flags & SPIRV::FPFastMathMode::AllowContract))) {
2664 report_fatal_error("Conflicting FPFastMathFlags: "
2665 "AllowTransform requires AllowReassoc and "
2666 "AllowContract to be set.");
2667 }
2668
2669 auto it = GlobalVars.find(Flags);
2670 GlobalVariable *GV = nullptr;
2671 if (it != GlobalVars.end()) {
2672 // Reuse existing global variable.
2673 GV = it->second;
2674 } else {
2675 // Create constant instruction with the bitmask flags.
2676 Constant *InitValue =
2677 ConstantInt::get(Type::getInt32Ty(M.getContext()), Flags);
2678 // TODO: Reuse constant if there is one already with the required
2679 // value.
2680 GV = new GlobalVariable(M, // Module
2681 Type::getInt32Ty(M.getContext()), // Type
2682 true, // isConstant
2684 InitValue // Initializer
2685 );
2686 GlobalVars[Flags] = GV;
2687 }
2688 }
2689 }
2690}
2691
2692void SPIRVEmitIntrinsics::processInstrAfterVisit(Instruction *I,
2693 IRBuilder<> &B) {
2694 auto *II = dyn_cast<IntrinsicInst>(I);
2695 bool IsConstComposite =
2696 II && II->getIntrinsicID() == Intrinsic::spv_const_composite;
2697 if (IsConstComposite && TrackConstants) {
2699 auto t = AggrConsts.find(I);
2700 assert(t != AggrConsts.end());
2701 auto *NewOp =
2702 buildIntrWithMD(Intrinsic::spv_track_constant,
2703 {II->getType(), II->getType()}, t->second, I, {}, B);
2704 replaceAllUsesWith(I, NewOp, false);
2705 NewOp->setArgOperand(0, I);
2706 }
2707 bool IsPhi = isa<PHINode>(I), BPrepared = false;
2708 for (const auto &Op : I->operands()) {
2709 if (isa<PHINode>(I) || isa<SwitchInst>(I) ||
2711 continue;
2712 unsigned OpNo = Op.getOperandNo();
2713 if (II && ((II->getIntrinsicID() == Intrinsic::spv_gep && OpNo == 0) ||
2714 (!II->isBundleOperand(OpNo) &&
2715 II->paramHasAttr(OpNo, Attribute::ImmArg))))
2716 continue;
2717
2718 if (!BPrepared) {
2719 IsPhi ? B.SetInsertPointPastAllocas(I->getParent()->getParent())
2720 : B.SetInsertPoint(I);
2721 BPrepared = true;
2722 }
2723 Type *OpTy = Op->getType();
2724 Type *OpElemTy = GR->findDeducedElementType(Op);
2725 Value *NewOp = Op;
2726 if (OpTy->isTargetExtTy()) {
2727 // Since this value is replaced by poison, we need to do the same in
2728 // `insertAssignTypeIntrs`.
2729 Value *OpTyVal = getNormalizedPoisonValue(OpTy);
2730 NewOp = buildIntrWithMD(Intrinsic::spv_track_constant,
2731 {OpTy, OpTyVal->getType()}, Op, OpTyVal, {}, B);
2732 }
2733 if (!IsConstComposite && isPointerTy(OpTy) && OpElemTy != nullptr &&
2734 OpElemTy != IntegerType::getInt8Ty(I->getContext())) {
2735 SmallVector<Type *, 2> Types = {OpTy, OpTy};
2736 SmallVector<Value *, 2> Args = {
2737 NewOp, buildMD(getNormalizedPoisonValue(OpElemTy)),
2738 B.getInt32(getPointerAddressSpace(OpTy))};
2739 CallInst *PtrCasted =
2740 B.CreateIntrinsic(Intrinsic::spv_ptrcast, {Types}, Args);
2741 GR->buildAssignPtr(B, OpElemTy, PtrCasted);
2742 NewOp = PtrCasted;
2743 }
2744 if (NewOp != Op)
2745 I->setOperand(OpNo, NewOp);
2746 }
2747 if (Named.insert(I).second)
2748 emitAssignName(I, B);
2749}
2750
2751Type *SPIRVEmitIntrinsics::deduceFunParamElementType(Function *F,
2752 unsigned OpIdx) {
2753 std::unordered_set<Function *> FVisited;
2754 return deduceFunParamElementType(F, OpIdx, FVisited);
2755}
2756
2757Type *SPIRVEmitIntrinsics::deduceFunParamElementType(
2758 Function *F, unsigned OpIdx, std::unordered_set<Function *> &FVisited) {
2759 // maybe a cycle
2760 if (!FVisited.insert(F).second)
2761 return nullptr;
2762
2763 std::unordered_set<Value *> Visited;
2765 // search in function's call sites
2766 for (User *U : F->users()) {
2767 CallInst *CI = dyn_cast<CallInst>(U);
2768 if (!CI || OpIdx >= CI->arg_size())
2769 continue;
2770 Value *OpArg = CI->getArgOperand(OpIdx);
2771 if (!isPointerTy(OpArg->getType()))
2772 continue;
2773 // maybe we already know operand's element type
2774 if (Type *KnownTy = GR->findDeducedElementType(OpArg))
2775 return KnownTy;
2776 // try to deduce from the operand itself
2777 Visited.clear();
2778 if (Type *Ty = deduceElementTypeHelper(OpArg, Visited, false))
2779 return Ty;
2780 // search in actual parameter's users
2781 for (User *OpU : OpArg->users()) {
2783 if (!Inst || Inst == CI)
2784 continue;
2785 Visited.clear();
2786 if (Type *Ty = deduceElementTypeHelper(Inst, Visited, false))
2787 return Ty;
2788 }
2789 // check if it's a formal parameter of the outer function
2790 if (!CI->getParent() || !CI->getParent()->getParent())
2791 continue;
2792 Function *OuterF = CI->getParent()->getParent();
2793 if (FVisited.find(OuterF) != FVisited.end())
2794 continue;
2795 for (unsigned i = 0; i < OuterF->arg_size(); ++i) {
2796 if (OuterF->getArg(i) == OpArg) {
2797 Lookup.push_back(std::make_pair(OuterF, i));
2798 break;
2799 }
2800 }
2801 }
2802
2803 // search in function parameters
2804 for (auto &Pair : Lookup) {
2805 if (Type *Ty = deduceFunParamElementType(Pair.first, Pair.second, FVisited))
2806 return Ty;
2807 }
2808
2809 return nullptr;
2810}
2811
2812void SPIRVEmitIntrinsics::processParamTypesByFunHeader(Function *F,
2813 IRBuilder<> &B) {
2814 B.SetInsertPointPastAllocas(F);
2815 for (unsigned OpIdx = 0; OpIdx < F->arg_size(); ++OpIdx) {
2816 Argument *Arg = F->getArg(OpIdx);
2817 if (!isUntypedPointerTy(Arg->getType()))
2818 continue;
2819 Type *ElemTy = GR->findDeducedElementType(Arg);
2820 if (ElemTy)
2821 continue;
2822 if (hasPointeeTypeAttr(Arg) &&
2823 (ElemTy = getPointeeTypeByAttr(Arg)) != nullptr) {
2824 GR->buildAssignPtr(B, ElemTy, Arg);
2825 continue;
2826 }
2827 // search in function's call sites
2828 for (User *U : F->users()) {
2829 CallInst *CI = dyn_cast<CallInst>(U);
2830 if (!CI || OpIdx >= CI->arg_size())
2831 continue;
2832 Value *OpArg = CI->getArgOperand(OpIdx);
2833 if (!isPointerTy(OpArg->getType()))
2834 continue;
2835 // maybe we already know operand's element type
2836 if ((ElemTy = GR->findDeducedElementType(OpArg)) != nullptr)
2837 break;
2838 }
2839 if (ElemTy) {
2840 GR->buildAssignPtr(B, ElemTy, Arg);
2841 continue;
2842 }
2843 if (HaveFunPtrs) {
2844 for (User *U : Arg->users()) {
2845 CallInst *CI = dyn_cast<CallInst>(U);
2846 if (CI && !isa<IntrinsicInst>(CI) && CI->isIndirectCall() &&
2847 CI->getCalledOperand() == Arg &&
2848 CI->getParent()->getParent() == CurrF) {
2850 deduceOperandElementTypeFunctionPointer(CI, Ops, ElemTy, false);
2851 if (ElemTy) {
2852 GR->buildAssignPtr(B, ElemTy, Arg);
2853 break;
2854 }
2855 }
2856 }
2857 }
2858 }
2859}
2860
2861void SPIRVEmitIntrinsics::processParamTypes(Function *F, IRBuilder<> &B) {
2862 B.SetInsertPointPastAllocas(F);
2863 for (unsigned OpIdx = 0; OpIdx < F->arg_size(); ++OpIdx) {
2864 Argument *Arg = F->getArg(OpIdx);
2865 if (!isUntypedPointerTy(Arg->getType()))
2866 continue;
2867 Type *ElemTy = GR->findDeducedElementType(Arg);
2868 if (!ElemTy && (ElemTy = deduceFunParamElementType(F, OpIdx)) != nullptr) {
2869 if (CallInst *AssignCI = GR->findAssignPtrTypeInstr(Arg)) {
2870 DenseSet<std::pair<Value *, Value *>> VisitedSubst;
2871 GR->updateAssignType(AssignCI, Arg, getNormalizedPoisonValue(ElemTy));
2872 propagateElemType(Arg, IntegerType::getInt8Ty(F->getContext()),
2873 VisitedSubst);
2874 } else {
2875 GR->buildAssignPtr(B, ElemTy, Arg);
2876 }
2877 }
2878 }
2879}
2880
2882 SPIRVGlobalRegistry *GR) {
2883 FunctionType *FTy = F->getFunctionType();
2884 bool IsNewFTy = false;
2886 for (Argument &Arg : F->args()) {
2887 Type *ArgTy = Arg.getType();
2888 if (ArgTy->isPointerTy())
2889 if (Type *ElemTy = GR->findDeducedElementType(&Arg)) {
2890 IsNewFTy = true;
2891 ArgTy = getTypedPointerWrapper(ElemTy, getPointerAddressSpace(ArgTy));
2892 }
2893 ArgTys.push_back(ArgTy);
2894 }
2895 return IsNewFTy
2896 ? FunctionType::get(FTy->getReturnType(), ArgTys, FTy->isVarArg())
2897 : FTy;
2898}
2899
2900bool SPIRVEmitIntrinsics::processFunctionPointers(Module &M) {
2901 SmallVector<Function *> Worklist;
2902 for (auto &F : M) {
2903 if (F.isIntrinsic())
2904 continue;
2905 if (F.isDeclaration()) {
2906 for (User *U : F.users()) {
2907 CallInst *CI = dyn_cast<CallInst>(U);
2908 if (!CI || CI->getCalledFunction() != &F) {
2909 Worklist.push_back(&F);
2910 break;
2911 }
2912 }
2913 } else {
2914 if (F.user_empty())
2915 continue;
2916 Type *FPElemTy = GR->findDeducedElementType(&F);
2917 if (!FPElemTy)
2918 FPElemTy = getFunctionPointerElemType(&F, GR);
2919 for (User *U : F.users()) {
2920 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
2921 if (!II || II->arg_size() != 3 || II->getOperand(0) != &F)
2922 continue;
2923 if (II->getIntrinsicID() == Intrinsic::spv_assign_ptr_type ||
2924 II->getIntrinsicID() == Intrinsic::spv_ptrcast) {
2926 break;
2927 }
2928 }
2929 }
2930 }
2931 if (Worklist.empty())
2932 return false;
2933
2934 LLVMContext &Ctx = M.getContext();
2936 BasicBlock *BB = BasicBlock::Create(Ctx, "entry", SF);
2937 IRBuilder<> IRB(BB);
2938
2939 for (Function *F : Worklist) {
2941 for (const auto &Arg : F->args())
2942 Args.push_back(getNormalizedPoisonValue(Arg.getType()));
2943 IRB.CreateCall(F, Args);
2944 }
2945 IRB.CreateRetVoid();
2946
2947 return true;
2948}
2949
2950// Apply types parsed from demangled function declarations.
2951void SPIRVEmitIntrinsics::applyDemangledPtrArgTypes(IRBuilder<> &B) {
2952 DenseMap<Function *, CallInst *> Ptrcasts;
2953 for (auto It : FDeclPtrTys) {
2954 Function *F = It.first;
2955 for (auto *U : F->users()) {
2956 CallInst *CI = dyn_cast<CallInst>(U);
2957 if (!CI || CI->getCalledFunction() != F)
2958 continue;
2959 unsigned Sz = CI->arg_size();
2960 for (auto [Idx, ElemTy] : It.second) {
2961 if (Idx >= Sz)
2962 continue;
2963 Value *Param = CI->getArgOperand(Idx);
2964 if (GR->findDeducedElementType(Param) || isa<GlobalValue>(Param))
2965 continue;
2966 if (Argument *Arg = dyn_cast<Argument>(Param)) {
2967 if (!hasPointeeTypeAttr(Arg)) {
2968 B.SetInsertPointPastAllocas(Arg->getParent());
2969 B.SetCurrentDebugLocation(DebugLoc());
2970 GR->buildAssignPtr(B, ElemTy, Arg);
2971 }
2972 } else if (isaGEP(Param)) {
2973 replaceUsesOfWithSpvPtrcast(Param, normalizeType(ElemTy), CI,
2974 Ptrcasts);
2975 } else if (isa<Instruction>(Param)) {
2976 GR->addDeducedElementType(Param, normalizeType(ElemTy));
2977 // insertAssignTypeIntrs() will complete buildAssignPtr()
2978 } else {
2979 B.SetInsertPoint(CI->getParent()
2980 ->getParent()
2981 ->getEntryBlock()
2982 .getFirstNonPHIOrDbgOrAlloca());
2983 GR->buildAssignPtr(B, ElemTy, Param);
2984 }
2985 CallInst *Ref = dyn_cast<CallInst>(Param);
2986 if (!Ref)
2987 continue;
2988 Function *RefF = Ref->getCalledFunction();
2989 if (!RefF || !isPointerTy(RefF->getReturnType()) ||
2990 GR->findDeducedElementType(RefF))
2991 continue;
2992 ElemTy = normalizeType(ElemTy);
2993 GR->addDeducedElementType(RefF, ElemTy);
2994 GR->addReturnType(
2996 ElemTy, getPointerAddressSpace(RefF->getReturnType())));
2997 }
2998 }
2999 }
3000}
3001
3002GetElementPtrInst *
3003SPIRVEmitIntrinsics::simplifyZeroLengthArrayGepInst(GetElementPtrInst *GEP) {
3004 // getelementptr [0 x T], P, 0 (zero), I -> getelementptr T, P, I.
3005 // If type is 0-length array and first index is 0 (zero), drop both the
3006 // 0-length array type and the first index. This is a common pattern in
3007 // the IR, e.g. when using a zero-length array as a placeholder for a
3008 // flexible array such as unbound arrays.
3009 assert(GEP && "GEP is null");
3010 Type *SrcTy = GEP->getSourceElementType();
3011 SmallVector<Value *, 8> Indices(GEP->indices());
3012 ArrayType *ArrTy = dyn_cast<ArrayType>(SrcTy);
3013 if (ArrTy && ArrTy->getNumElements() == 0 &&
3015 Indices.erase(Indices.begin());
3016 SrcTy = ArrTy->getElementType();
3017 return GetElementPtrInst::Create(SrcTy, GEP->getPointerOperand(), Indices,
3018 GEP->getNoWrapFlags(), "",
3019 GEP->getIterator());
3020 }
3021 return nullptr;
3022}
3023
3024void SPIRVEmitIntrinsics::emitUnstructuredLoopControls(Function &F,
3025 IRBuilder<> &B) {
3026 const SPIRVSubtarget *ST = TM->getSubtargetImpl(F);
3027 // Shaders use SPIRVStructurizer which emits OpLoopMerge via spv_loop_merge.
3028 if (ST->isShader())
3029 return;
3030 if (!ST->canUseExtension(
3031 SPIRV::Extension::SPV_INTEL_unstructured_loop_controls))
3032 return;
3033
3034 for (BasicBlock &BB : F) {
3036 MDNode *LoopMD = Term->getMetadata(LLVMContext::MD_loop);
3037 if (!LoopMD)
3038 continue;
3039
3042 unsigned LC = Ops[0];
3043 if (LC == SPIRV::LoopControl::None)
3044 continue;
3045
3046 // Emit intrinsic: loop control mask + optional parameters.
3047 B.SetInsertPoint(Term);
3048 SmallVector<Value *, 4> IntrArgs;
3049 IntrArgs.push_back(B.getInt32(LC));
3050 for (unsigned I = 1; I < Ops.size(); ++I)
3051 IntrArgs.push_back(B.getInt32(Ops[I]));
3052 B.CreateIntrinsic(Intrinsic::spv_loop_control_intel, IntrArgs);
3053 }
3054}
3055
3056bool SPIRVEmitIntrinsics::runOnFunction(Function &Func) {
3057 if (Func.isDeclaration())
3058 return false;
3059
3060 const SPIRVSubtarget &ST = TM->getSubtarget<SPIRVSubtarget>(Func);
3061 GR = ST.getSPIRVGlobalRegistry();
3062
3063 if (!CurrF)
3064 HaveFunPtrs =
3065 ST.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers);
3066
3067 CurrF = &Func;
3068 IRBuilder<> B(Func.getContext());
3069 AggrConsts.clear();
3070 AggrConstTypes.clear();
3071 AggrStores.clear();
3072
3073 // Fix GEP result types ahead of inference, and simplify if possible.
3074 // Data structure for dead instructions that were simplified and replaced.
3075 SmallPtrSet<Instruction *, 4> DeadInsts;
3076 for (auto &I : instructions(Func)) {
3078 auto *SGEP = dyn_cast<StructuredGEPInst>(&I);
3079
3080 if ((!GEP && !SGEP) || GR->findDeducedElementType(&I))
3081 continue;
3082
3083 if (SGEP) {
3084 GR->addDeducedElementType(SGEP,
3085 normalizeType(SGEP->getResultElementType()));
3086 continue;
3087 }
3088
3089 GetElementPtrInst *NewGEP = simplifyZeroLengthArrayGepInst(GEP);
3090 if (NewGEP) {
3091 GEP->replaceAllUsesWith(NewGEP);
3092 DeadInsts.insert(GEP);
3093 GEP = NewGEP;
3094 }
3095 if (Type *GepTy = getGEPType(GEP))
3096 GR->addDeducedElementType(GEP, normalizeType(GepTy));
3097 }
3098 // Remove dead instructions that were simplified and replaced.
3099 for (auto *I : DeadInsts) {
3100 assert(I->use_empty() && "Dead instruction should not have any uses left");
3101 I->eraseFromParent();
3102 }
3103
3104 processParamTypesByFunHeader(CurrF, B);
3105
3106 // StoreInst's operand type can be changed during the next
3107 // transformations, so we need to store it in the set. Also store already
3108 // transformed types.
3109 for (auto &I : instructions(Func)) {
3110 StoreInst *SI = dyn_cast<StoreInst>(&I);
3111 if (!SI)
3112 continue;
3113 Type *ElTy = SI->getValueOperand()->getType();
3114 if (ElTy->isAggregateType() || ElTy->isVectorTy())
3115 AggrStores.insert(&I);
3116 }
3117
3118 B.SetInsertPoint(&Func.getEntryBlock(), Func.getEntryBlock().begin());
3119 for (auto &GV : Func.getParent()->globals())
3120 processGlobalValue(GV, B);
3121
3122 preprocessUndefs(B);
3123 preprocessCompositeConstants(B);
3126
3127 applyDemangledPtrArgTypes(B);
3128
3129 // Pass forward: use operand to deduce instructions result.
3130 for (auto &I : Worklist) {
3131 // Don't emit intrinsincs for convergence intrinsics.
3132 if (isConvergenceIntrinsic(I))
3133 continue;
3134
3135 bool Postpone = insertAssignPtrTypeIntrs(I, B, false);
3136 // if Postpone is true, we can't decide on pointee type yet
3137 insertAssignTypeIntrs(I, B);
3138 insertPtrCastOrAssignTypeInstr(I, B);
3140 // if instruction requires a pointee type set, let's check if we know it
3141 // already, and force it to be i8 if not
3142 if (Postpone && !GR->findAssignPtrTypeInstr(I))
3143 insertAssignPtrTypeIntrs(I, B, true);
3144
3145 if (auto *FPI = dyn_cast<ConstrainedFPIntrinsic>(I))
3146 useRoundingMode(FPI, B);
3147 }
3148
3149 // Pass backward: use instructions results to specify/update/cast operands
3150 // where needed.
3151 SmallPtrSet<Instruction *, 4> IncompleteRets;
3152 for (auto &I : llvm::reverse(instructions(Func)))
3153 deduceOperandElementType(&I, &IncompleteRets);
3154
3155 // Pass forward for PHIs only, their operands are not preceed the
3156 // instruction in meaning of `instructions(Func)`.
3157 for (BasicBlock &BB : Func)
3158 for (PHINode &Phi : BB.phis())
3159 if (isPointerTy(Phi.getType()))
3160 deduceOperandElementType(&Phi, nullptr);
3161
3162 for (auto *I : Worklist) {
3163 TrackConstants = true;
3164 if (!I->getType()->isVoidTy() || isa<StoreInst>(I))
3166 // Visitors return either the original/newly created instruction for
3167 // further processing, nullptr otherwise.
3168 I = visit(*I);
3169 if (!I)
3170 continue;
3171
3172 // Don't emit intrinsics for convergence operations.
3173 if (isConvergenceIntrinsic(I))
3174 continue;
3175
3177 processInstrAfterVisit(I, B);
3178 }
3179
3180 emitUnstructuredLoopControls(Func, B);
3181
3182 return true;
3183}
3184
3185// Try to deduce a better type for pointers to untyped ptr.
3186bool SPIRVEmitIntrinsics::postprocessTypes(Module &M) {
3187 if (!GR || TodoTypeSz == 0)
3188 return false;
3189
3190 unsigned SzTodo = TodoTypeSz;
3191 DenseMap<Value *, SmallPtrSet<Value *, 4>> ToProcess;
3192 for (auto [Op, Enabled] : TodoType) {
3193 // TODO: add isa<CallInst>(Op) to continue
3194 if (!Enabled || isaGEP(Op))
3195 continue;
3196 CallInst *AssignCI = GR->findAssignPtrTypeInstr(Op);
3197 Type *KnownTy = GR->findDeducedElementType(Op);
3198 if (!KnownTy || !AssignCI)
3199 continue;
3200 assert(Op == AssignCI->getArgOperand(0));
3201 // Try to improve the type deduced after all Functions are processed.
3202 if (auto *CI = dyn_cast<Instruction>(Op)) {
3203 CurrF = CI->getParent()->getParent();
3204 std::unordered_set<Value *> Visited;
3205 if (Type *ElemTy = deduceElementTypeHelper(Op, Visited, false, true)) {
3206 if (ElemTy != KnownTy) {
3207 DenseSet<std::pair<Value *, Value *>> VisitedSubst;
3208 propagateElemType(CI, ElemTy, VisitedSubst);
3209 eraseTodoType(Op);
3210 continue;
3211 }
3212 }
3213 }
3214
3215 if (Op->hasUseList()) {
3216 for (User *U : Op->users()) {
3218 if (Inst && !isa<IntrinsicInst>(Inst))
3219 ToProcess[Inst].insert(Op);
3220 }
3221 }
3222 }
3223 if (TodoTypeSz == 0)
3224 return true;
3225
3226 for (auto &F : M) {
3227 CurrF = &F;
3228 SmallPtrSet<Instruction *, 4> IncompleteRets;
3229 for (auto &I : llvm::reverse(instructions(F))) {
3230 auto It = ToProcess.find(&I);
3231 if (It == ToProcess.end())
3232 continue;
3233 It->second.remove_if([this](Value *V) { return !isTodoType(V); });
3234 if (It->second.size() == 0)
3235 continue;
3236 deduceOperandElementType(&I, &IncompleteRets, &It->second, true);
3237 if (TodoTypeSz == 0)
3238 return true;
3239 }
3240 }
3241
3242 return SzTodo > TodoTypeSz;
3243}
3244
3245// Parse and store argument types of function declarations where needed.
3246void SPIRVEmitIntrinsics::parseFunDeclarations(Module &M) {
3247 for (auto &F : M) {
3248 if (!F.isDeclaration() || F.isIntrinsic())
3249 continue;
3250 // get the demangled name
3251 std::string DemangledName = getOclOrSpirvBuiltinDemangledName(F.getName());
3252 if (DemangledName.empty())
3253 continue;
3254 // allow only OpGroupAsyncCopy use case at the moment
3255 const SPIRVSubtarget &ST = TM->getSubtarget<SPIRVSubtarget>(F);
3256 auto [Grp, Opcode, ExtNo] = SPIRV::mapBuiltinToOpcode(
3257 DemangledName, ST.getPreferredInstructionSet());
3258 if (Opcode != SPIRV::OpGroupAsyncCopy)
3259 continue;
3260 // find pointer arguments
3261 SmallVector<unsigned> Idxs;
3262 for (unsigned OpIdx = 0; OpIdx < F.arg_size(); ++OpIdx) {
3263 Argument *Arg = F.getArg(OpIdx);
3264 if (isPointerTy(Arg->getType()) && !hasPointeeTypeAttr(Arg))
3265 Idxs.push_back(OpIdx);
3266 }
3267 if (!Idxs.size())
3268 continue;
3269 // parse function arguments
3270 LLVMContext &Ctx = F.getContext();
3272 SPIRV::parseBuiltinTypeStr(TypeStrs, DemangledName, Ctx);
3273 if (!TypeStrs.size())
3274 continue;
3275 // find type info for pointer arguments
3276 for (unsigned Idx : Idxs) {
3277 if (Idx >= TypeStrs.size())
3278 continue;
3279 if (Type *ElemTy =
3280 SPIRV::parseBuiltinCallArgumentType(TypeStrs[Idx].trim(), Ctx))
3282 !ElemTy->isTargetExtTy())
3283 FDeclPtrTys[&F].push_back(std::make_pair(Idx, ElemTy));
3284 }
3285 }
3286}
3287
3288bool SPIRVEmitIntrinsics::processMaskedMemIntrinsic(IntrinsicInst &I) {
3289 const SPIRVSubtarget &ST = TM->getSubtarget<SPIRVSubtarget>(*I.getFunction());
3290
3291 if (I.getIntrinsicID() == Intrinsic::masked_gather) {
3292 if (!ST.canUseExtension(
3293 SPIRV::Extension::SPV_INTEL_masked_gather_scatter)) {
3294 I.getContext().emitError(
3295 &I, "llvm.masked.gather requires SPV_INTEL_masked_gather_scatter "
3296 "extension");
3297 // Replace with poison to allow compilation to continue and report error.
3298 I.replaceAllUsesWith(PoisonValue::get(I.getType()));
3299 I.eraseFromParent();
3300 return true;
3301 }
3302
3303 IRBuilder<> B(&I);
3304
3305 Value *Ptrs = I.getArgOperand(0);
3306 Value *Mask = I.getArgOperand(1);
3307 Value *Passthru = I.getArgOperand(2);
3308
3309 // Alignment is stored as a parameter attribute, not as a regular parameter
3310 uint32_t Alignment = I.getParamAlign(0).valueOrOne().value();
3311
3312 SmallVector<Value *, 4> Args = {Ptrs, B.getInt32(Alignment), Mask,
3313 Passthru};
3314 SmallVector<Type *, 4> Types = {I.getType(), Ptrs->getType(),
3315 Mask->getType(), Passthru->getType()};
3316
3317 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_masked_gather, Types, Args);
3318 I.replaceAllUsesWith(NewI);
3319 I.eraseFromParent();
3320 return true;
3321 }
3322
3323 if (I.getIntrinsicID() == Intrinsic::masked_scatter) {
3324 if (!ST.canUseExtension(
3325 SPIRV::Extension::SPV_INTEL_masked_gather_scatter)) {
3326 I.getContext().emitError(
3327 &I, "llvm.masked.scatter requires SPV_INTEL_masked_gather_scatter "
3328 "extension");
3329 // Erase the intrinsic to allow compilation to continue and report error.
3330 I.eraseFromParent();
3331 return true;
3332 }
3333
3334 IRBuilder<> B(&I);
3335
3336 Value *Values = I.getArgOperand(0);
3337 Value *Ptrs = I.getArgOperand(1);
3338 Value *Mask = I.getArgOperand(2);
3339
3340 // Alignment is stored as a parameter attribute on the ptrs parameter (arg
3341 // 1)
3342 uint32_t Alignment = I.getParamAlign(1).valueOrOne().value();
3343
3344 SmallVector<Value *, 4> Args = {Values, Ptrs, B.getInt32(Alignment), Mask};
3345 SmallVector<Type *, 3> Types = {Values->getType(), Ptrs->getType(),
3346 Mask->getType()};
3347
3348 B.CreateIntrinsic(Intrinsic::spv_masked_scatter, Types, Args);
3349 I.eraseFromParent();
3350 return true;
3351 }
3352
3353 return false;
3354}
3355
3356bool SPIRVEmitIntrinsics::convertMaskedMemIntrinsics(Module &M) {
3357 bool Changed = false;
3358
3359 for (Function &F : make_early_inc_range(M)) {
3360 if (!F.isIntrinsic())
3361 continue;
3362 Intrinsic::ID IID = F.getIntrinsicID();
3363 if (IID != Intrinsic::masked_gather && IID != Intrinsic::masked_scatter)
3364 continue;
3365
3366 for (User *U : make_early_inc_range(F.users())) {
3367 if (auto *II = dyn_cast<IntrinsicInst>(U))
3368 Changed |= processMaskedMemIntrinsic(*II);
3369 }
3370
3371 if (F.use_empty())
3372 F.eraseFromParent();
3373 }
3374
3375 return Changed;
3376}
3377
3378bool SPIRVEmitIntrinsics::runOnModule(Module &M) {
3379 bool Changed = false;
3380
3381 Changed |= convertMaskedMemIntrinsics(M);
3382
3383 parseFunDeclarations(M);
3384 insertConstantsForFPFastMathDefault(M);
3385 GVUsers.init(M);
3386
3387 TodoType.clear();
3388 for (auto &F : M)
3390
3391 // Specify function parameters after all functions were processed.
3392 for (auto &F : M) {
3393 // check if function parameter types are set
3394 CurrF = &F;
3395 if (!F.isDeclaration() && !F.isIntrinsic()) {
3396 IRBuilder<> B(F.getContext());
3397 processParamTypes(&F, B);
3398 }
3399 }
3400
3401 CanTodoType = false;
3402 Changed |= postprocessTypes(M);
3403
3404 if (HaveFunPtrs)
3405 Changed |= processFunctionPointers(M);
3406
3407 return Changed;
3408}
3409
3411 return new SPIRVEmitIntrinsics(TM);
3412}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
aarch64 promote const
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
always inline
Expand Atomic instructions
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static void replaceAllUsesWith(Value *Old, Value *New, SmallPtrSet< BasicBlock *, 32 > &FreshBBs, bool IsHuge)
Replace all old uses with new ones, and push the updated BBs into FreshBBs.
static Type * getPointeeType(Value *Ptr, const DataLayout &DL)
This file defines the DenseSet and SmallDenseSet classes.
static bool runOnFunction(Function &F, bool PostInlining)
Hexagon Common GEP
iv Induction Variable Users
Definition IVUsers.cpp:48
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Check Debug Module
#define T
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
Function * Fun
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
static unsigned getNumElements(Type *Ty)
static bool isMemInstrToReplace(Instruction *I)
static bool isAggrConstForceInt32(const Value *V)
static SPIRV::FPFastMathDefaultInfoVector & getOrCreateFPFastMathDefaultInfoVec(const Module &M, DenseMap< Function *, SPIRV::FPFastMathDefaultInfoVector > &FPFastMathDefaultInfoMap, Function *F)
static Type * getAtomicElemTy(SPIRVGlobalRegistry *GR, Instruction *I, Value *PointerOperand)
static void reportFatalOnTokenType(const Instruction *I)
static void setInsertPointAfterDef(IRBuilder<> &B, Instruction *I)
static void emitAssignName(Instruction *I, IRBuilder<> &B)
static Type * getPointeeTypeByCallInst(StringRef DemangledName, Function *CalledF, unsigned OpIdx)
static void createRoundingModeDecoration(Instruction *I, unsigned RoundingModeDeco, IRBuilder<> &B)
static void createDecorationIntrinsic(Instruction *I, MDNode *Node, IRBuilder<> &B)
static SPIRV::FPFastMathDefaultInfo & getFPFastMathDefaultInfo(SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec, const Type *Ty)
static cl::opt< bool > SpirvEmitOpNames("spirv-emit-op-names", cl::desc("Emit OpName for all instructions"), cl::init(false))
static bool IsKernelArgInt8(Function *F, StoreInst *SI)
static void addSaturatedDecorationToIntrinsic(Instruction *I, IRBuilder<> &B)
static bool isFirstIndexZero(const GetElementPtrInst *GEP)
static void setInsertPointSkippingPhis(IRBuilder<> &B, Instruction *I)
static FunctionType * getFunctionPointerElemType(Function *F, SPIRVGlobalRegistry *GR)
static void createSaturatedConversionDecoration(Instruction *I, IRBuilder<> &B)
static bool shouldEmitIntrinsicsForGlobalValue(const GlobalVariableUsers &GVUsers, const GlobalVariable &GV, const Function *F)
static Type * restoreMutatedType(SPIRVGlobalRegistry *GR, Instruction *I, Type *Ty)
static bool requireAssignType(Instruction *I)
static void insertSpirvDecorations(MachineFunction &MF, SPIRVGlobalRegistry *GR, MachineIRBuilder MIB)
static void visit(BasicBlock &Start, std::function< bool(BasicBlock *)> op)
StringSet - A set-like wrapper for the StringMap.
DEMANGLE_NAMESPACE_BEGIN bool starts_with(std::string_view self, char C) noexcept
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
LocallyHashedType DenseMapInfo< LocallyHashedType >::Empty
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
const Function * getParent() const
Definition Argument.h:44
static unsigned getPointerOperandIndex()
static unsigned getPointerOperandIndex()
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:539
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
static LLVM_ABI BlockAddress * get(Function *F, BasicBlock *BB)
Return a BlockAddress for the specified function and basic block.
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static ConstantAsMetadata * get(Constant *C)
Definition Metadata.h:537
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:256
iterator end()
Definition DenseMap.h:81
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
const DataLayout & getDataLayout() const
Get the data layout of the module this function belongs to.
Definition Function.cpp:362
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition Function.h:246
iterator begin()
Definition Function.h:853
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
Definition Function.h:251
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:358
size_t arg_size() const
Definition Function.h:901
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:216
Argument * getArg(unsigned i) const
Definition Function.h:886
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static unsigned getPointerOperandIndex()
PointerType * getType() const
Global values are always pointers.
@ InternalLinkage
Rename collisions when linking (static functions).
Definition GlobalValue.h:60
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2788
LLVM_ABI void addDestination(BasicBlock *Dest)
Add a destination.
Base class for instruction visitors.
Definition InstVisitor.h:78
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
static unsigned getPointerOperandIndex()
Metadata node.
Definition Metadata.h:1080
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1444
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1572
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1450
static LLVM_ABI MDString * get(LLVMContext &Context, StringRef Str)
Definition Metadata.cpp:614
Flags
Flags values. These may be or'd together.
static LLVM_ABI MetadataAsValue * get(LLVMContext &Context, Metadata *MD)
Definition Metadata.cpp:110
Metadata * getMetadata() const
Definition Metadata.h:202
ModulePass class - This class is used to implement unstructured interprocedural optimizations and ana...
Definition Pass.h:255
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
void addAssignPtrTypeInstr(Value *Val, CallInst *AssignPtrTyCI)
void buildAssignPtr(IRBuilder<> &B, Type *ElemTy, Value *Arg)
Type * findDeducedCompositeType(const Value *Val)
void replaceAllUsesWith(Value *Old, Value *New, bool DeleteOld=true)
void addDeducedElementType(Value *Val, Type *Ty)
void addReturnType(const Function *ArgF, TypedPointerType *DerivedTy)
Type * findMutated(const Value *Val)
void addDeducedCompositeType(Value *Val, Type *Ty)
void buildAssignType(IRBuilder<> &B, Type *Ty, Value *Arg)
Type * findDeducedElementType(const Value *Val)
void updateAssignType(CallInst *AssignCI, Value *Arg, Value *OfType)
CallInst * findAssignPtrTypeInstr(const Value *Val)
const SPIRVTargetLowering * getTargetLowering() const override
bool isLogicalSPIRV() const
bool canUseExtension(SPIRV::Extension::Extension E) const
const SPIRVSubtarget * getSubtargetImpl() const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
void assign(size_type NumElts, ValueParamT Elt)
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
static unsigned getPointerOperandIndex()
iterator end()
Definition StringMap.h:224
iterator find(StringRef Key)
Definition StringMap.h:237
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:258
StringSet - A wrapper for StringMap that provides set-like functionality.
Definition StringSet.h:25
bool contains(StringRef key) const
Check if the set contains the given key.
Definition StringSet.h:60
static LLVM_ABI StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Definition Type.cpp:619
static unsigned getPointerOperandIndex()
static LLVM_ABI TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types={}, ArrayRef< unsigned > Ints={})
Return a target extension type having the specified name and optional type and integer parameters.
Definition Type.cpp:907
const STC & getSubtarget(const Function &F) const
This method returns a pointer to the specified type of TargetSubtargetInfo.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:264
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
Type * getArrayElementType() const
Definition Type.h:408
LLVM_ABI StringRef getTargetExtName() const
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:294
bool isStructTy() const
True if this is an instance of StructType.
Definition Type.h:261
bool isTargetExtTy() const
Return true if this is a target extension type.
Definition Type.h:203
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition Type.h:304
static LLVM_ABI Type * getDoubleTy(LLVMContext &C)
Definition Type.cpp:285
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
Definition Type.cpp:284
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
Definition Type.cpp:282
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
static LLVM_ABI bool isValidElementType(Type *ElemTy)
Return true if the specified type is valid as a element type.
static LLVM_ABI TypedPointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
op_range operands()
Definition User.h:267
void setOperand(unsigned i, Value *Val)
Definition User.h:212
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition User.cpp:25
Value * getOperand(unsigned i) const
Definition User.h:207
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
user_iterator user_begin()
Definition Value.h:403
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:397
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:553
iterator_range< user_iterator > users()
Definition Value.h:427
bool use_empty() const
Definition Value.h:347
user_iterator user_end()
Definition Value.h:411
void mutateType(Type *Ty)
Mutate the type of this Value to be of the specified type.
Definition Value.h:840
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
bool user_empty() const
Definition Value.h:390
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:202
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition DenseSet.h:175
const ParentTy * getParent() const
Definition ilist_node.h:34
CallInst * Call
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
bool match(Val *V, const Pattern &P)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
initializer< Ty > init(const Ty &Val)
@ User
could "use" a pointer
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
NodeAddr< FuncNode * > Func
Definition RDFGraph.h:393
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool isTypedPointerWrapper(const TargetExtType *ExtTy)
Definition SPIRVUtils.h:406
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2554
ModulePass * createSPIRVEmitIntrinsicsPass(SPIRVTargetMachine *TM)
unsigned getPointerAddressSpace(const Type *T)
Definition SPIRVUtils.h:370
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
CallInst * buildIntrWithMD(Intrinsic::ID IntrID, ArrayRef< Type * > Types, Value *Arg, Value *Arg2, ArrayRef< Constant * > Imms, IRBuilder<> &B)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2208
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
FPDecorationId
Definition SPIRVUtils.h:550
bool isNestedPointer(const Type *Ty)
Function * getOrCreateBackendServiceFunction(Module &M)
MetadataAsValue * buildMD(Value *Arg)
Definition SPIRVUtils.h:516
std::string getOclOrSpirvBuiltinDemangledName(StringRef Name)
SmallVector< unsigned, 1 > getSpirvLoopControlOperandsFromLoopMetadata(MDNode *LoopMD)
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
Type * getTypedPointerWrapper(Type *ElemTy, unsigned AS)
Definition SPIRVUtils.h:401
bool isVector1(Type *Ty)
Definition SPIRVUtils.h:494
bool isPointerTy(const Type *T)
Definition SPIRVUtils.h:364
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
bool set_union(S1Ty &S1, const S2Ty &S2)
set_union(A, B) - Compute A := A u B, return whether A changed.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
@ Ref
The access may reference the value stored in memory.
Definition ModRef.h:32
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
DWARFExpression::Operation Op
Type * getPointeeTypeByAttr(Argument *Arg)
Definition SPIRVUtils.h:383
bool hasPointeeTypeAttr(Argument *Arg)
Definition SPIRVUtils.h:378
constexpr unsigned BitWidth
bool isEquivalentTypes(Type *Ty1, Type *Ty2)
Definition SPIRVUtils.h:456
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition iterator.h:368
bool hasInitializer(const GlobalVariable *GV)
Definition SPIRVUtils.h:349
Type * normalizeType(Type *Ty)
Definition SPIRVUtils.h:502
@ Enabled
Convert any .debug_str_offsets tables to DWARF64 if needed.
Definition DWP.h:27
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
PoisonValue * getNormalizedPoisonValue(Type *Ty)
Definition SPIRVUtils.h:512
bool isUntypedPointerTy(const Type *T)
Definition SPIRVUtils.h:359
Type * reconstitutePeeledArrayType(Type *Ty)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
static size_t computeFPFastMathDefaultInfoVecIndex(size_t BitWidth)
Definition SPIRVUtils.h:149