LLVM 22.0.0git
SPIRVEmitIntrinsics.cpp
Go to the documentation of this file.
1//===-- SPIRVEmitIntrinsics.cpp - emit SPIRV intrinsics ---------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The pass emits SPIRV intrinsics keeping essential high-level information for
10// the translation of LLVM IR to SPIR-V.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SPIRV.h"
15#include "SPIRVBuiltins.h"
16#include "SPIRVSubtarget.h"
17#include "SPIRVTargetMachine.h"
18#include "SPIRVUtils.h"
19#include "llvm/ADT/DenseSet.h"
20#include "llvm/ADT/StringSet.h"
21#include "llvm/IR/IRBuilder.h"
23#include "llvm/IR/InstVisitor.h"
24#include "llvm/IR/IntrinsicsSPIRV.h"
28
29#include <cassert>
30#include <queue>
31#include <unordered_set>
32
33// This pass performs the following transformation on LLVM IR level required
34// for the following translation to SPIR-V:
35// - replaces direct usages of aggregate constants with target-specific
36// intrinsics;
37// - replaces aggregates-related instructions (extract/insert, ld/st, etc)
38// with a target-specific intrinsics;
39// - emits intrinsics for the global variable initializers since IRTranslator
40// doesn't handle them and it's not very convenient to translate them
41// ourselves;
42// - emits intrinsics to keep track of the string names assigned to the values;
43// - emits intrinsics to keep track of constants (this is necessary to have an
44// LLVM IR constant after the IRTranslation is completed) for their further
45// deduplication;
46// - emits intrinsics to keep track of original LLVM types of the values
47// to be able to emit proper SPIR-V types eventually.
48//
49// TODO: consider removing spv.track.constant in favor of spv.assign.type.
50
51using namespace llvm;
52
53namespace llvm::SPIRV {
54#define GET_BuiltinGroup_DECL
55#include "SPIRVGenTables.inc"
56} // namespace llvm::SPIRV
57
58namespace {
59
60class SPIRVEmitIntrinsics
61 : public ModulePass,
62 public InstVisitor<SPIRVEmitIntrinsics, Instruction *> {
63 SPIRVTargetMachine *TM = nullptr;
64 SPIRVGlobalRegistry *GR = nullptr;
65 Function *CurrF = nullptr;
66 bool TrackConstants = true;
67 bool HaveFunPtrs = false;
68 DenseMap<Instruction *, Constant *> AggrConsts;
69 DenseMap<Instruction *, Type *> AggrConstTypes;
70 DenseSet<Instruction *> AggrStores;
71 std::unordered_set<Value *> Named;
72
73 // map of function declarations to <pointer arg index => element type>
74 DenseMap<Function *, SmallVector<std::pair<unsigned, Type *>>> FDeclPtrTys;
75
76 // a register of Instructions that don't have a complete type definition
77 bool CanTodoType = true;
78 unsigned TodoTypeSz = 0;
79 DenseMap<Value *, bool> TodoType;
80 void insertTodoType(Value *Op) {
81 // TODO: add isa<CallInst>(Op) to no-insert
82 if (CanTodoType && !isa<GetElementPtrInst>(Op)) {
83 auto It = TodoType.try_emplace(Op, true);
84 if (It.second)
85 ++TodoTypeSz;
86 }
87 }
88 void eraseTodoType(Value *Op) {
89 auto It = TodoType.find(Op);
90 if (It != TodoType.end() && It->second) {
91 It->second = false;
92 --TodoTypeSz;
93 }
94 }
95 bool isTodoType(Value *Op) {
97 return false;
98 auto It = TodoType.find(Op);
99 return It != TodoType.end() && It->second;
100 }
101 // a register of Instructions that were visited by deduceOperandElementType()
102 // to validate operand types with an instruction
103 std::unordered_set<Instruction *> TypeValidated;
104
105 // well known result types of builtins
106 enum WellKnownTypes { Event };
107
108 // deduce element type of untyped pointers
109 Type *deduceElementType(Value *I, bool UnknownElemTypeI8);
110 Type *deduceElementTypeHelper(Value *I, bool UnknownElemTypeI8);
111 Type *deduceElementTypeHelper(Value *I, std::unordered_set<Value *> &Visited,
112 bool UnknownElemTypeI8,
113 bool IgnoreKnownType = false);
114 Type *deduceElementTypeByValueDeep(Type *ValueTy, Value *Operand,
115 bool UnknownElemTypeI8);
116 Type *deduceElementTypeByValueDeep(Type *ValueTy, Value *Operand,
117 std::unordered_set<Value *> &Visited,
118 bool UnknownElemTypeI8);
119 Type *deduceElementTypeByUsersDeep(Value *Op,
120 std::unordered_set<Value *> &Visited,
121 bool UnknownElemTypeI8);
122 void maybeAssignPtrType(Type *&Ty, Value *I, Type *RefTy,
123 bool UnknownElemTypeI8);
124
125 // deduce nested types of composites
126 Type *deduceNestedTypeHelper(User *U, bool UnknownElemTypeI8);
127 Type *deduceNestedTypeHelper(User *U, Type *Ty,
128 std::unordered_set<Value *> &Visited,
129 bool UnknownElemTypeI8);
130
131 // deduce Types of operands of the Instruction if possible
132 void deduceOperandElementType(Instruction *I,
133 SmallPtrSet<Instruction *, 4> *IncompleteRets,
134 const SmallPtrSet<Value *, 4> *AskOps = nullptr,
135 bool IsPostprocessing = false);
136
137 void preprocessCompositeConstants(IRBuilder<> &B);
138 void preprocessUndefs(IRBuilder<> &B);
139
140 Type *reconstructType(Value *Op, bool UnknownElemTypeI8,
141 bool IsPostprocessing);
142
143 void replaceMemInstrUses(Instruction *Old, Instruction *New, IRBuilder<> &B);
144 void processInstrAfterVisit(Instruction *I, IRBuilder<> &B);
145 bool insertAssignPtrTypeIntrs(Instruction *I, IRBuilder<> &B,
146 bool UnknownElemTypeI8);
147 void insertAssignTypeIntrs(Instruction *I, IRBuilder<> &B);
148 void insertAssignPtrTypeTargetExt(TargetExtType *AssignedType, Value *V,
149 IRBuilder<> &B);
150 void replacePointerOperandWithPtrCast(Instruction *I, Value *Pointer,
151 Type *ExpectedElementType,
152 unsigned OperandToReplace,
153 IRBuilder<> &B);
154 void insertPtrCastOrAssignTypeInstr(Instruction *I, IRBuilder<> &B);
155 bool shouldTryToAddMemAliasingDecoration(Instruction *Inst);
157 void insertConstantsForFPFastMathDefault(Module &M);
158 void processGlobalValue(GlobalVariable &GV, IRBuilder<> &B);
159 void processParamTypes(Function *F, IRBuilder<> &B);
160 void processParamTypesByFunHeader(Function *F, IRBuilder<> &B);
161 Type *deduceFunParamElementType(Function *F, unsigned OpIdx);
162 Type *deduceFunParamElementType(Function *F, unsigned OpIdx,
163 std::unordered_set<Function *> &FVisited);
164
165 bool deduceOperandElementTypeCalledFunction(
166 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
167 Type *&KnownElemTy, bool &Incomplete);
168 void deduceOperandElementTypeFunctionPointer(
169 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
170 Type *&KnownElemTy, bool IsPostprocessing);
171 bool deduceOperandElementTypeFunctionRet(
172 Instruction *I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
173 const SmallPtrSet<Value *, 4> *AskOps, bool IsPostprocessing,
174 Type *&KnownElemTy, Value *Op, Function *F);
175
176 CallInst *buildSpvPtrcast(Function *F, Value *Op, Type *ElemTy);
177 void replaceUsesOfWithSpvPtrcast(Value *Op, Type *ElemTy, Instruction *I,
178 DenseMap<Function *, CallInst *> Ptrcasts);
179 void propagateElemType(Value *Op, Type *ElemTy,
180 DenseSet<std::pair<Value *, Value *>> &VisitedSubst);
181 void
182 propagateElemTypeRec(Value *Op, Type *PtrElemTy, Type *CastElemTy,
183 DenseSet<std::pair<Value *, Value *>> &VisitedSubst);
184 void propagateElemTypeRec(Value *Op, Type *PtrElemTy, Type *CastElemTy,
185 DenseSet<std::pair<Value *, Value *>> &VisitedSubst,
186 std::unordered_set<Value *> &Visited,
187 DenseMap<Function *, CallInst *> Ptrcasts);
188
189 void replaceAllUsesWith(Value *Src, Value *Dest, bool DeleteOld = true);
190 void replaceAllUsesWithAndErase(IRBuilder<> &B, Instruction *Src,
191 Instruction *Dest, bool DeleteOld = true);
192
193 void applyDemangledPtrArgTypes(IRBuilder<> &B);
194
195 GetElementPtrInst *simplifyZeroLengthArrayGepInst(GetElementPtrInst *GEP);
196
197 bool runOnFunction(Function &F);
198 bool postprocessTypes(Module &M);
199 bool processFunctionPointers(Module &M);
200 void parseFunDeclarations(Module &M);
201
202 void useRoundingMode(ConstrainedFPIntrinsic *FPI, IRBuilder<> &B);
203
204 // Tries to walk the type accessed by the given GEP instruction.
205 // For each nested type access, one of the 2 callbacks is called:
206 // - OnLiteralIndexing when the index is a known constant value.
207 // Parameters:
208 // PointedType: the pointed type resulting of this indexing.
209 // If the parent type is an array, this is the index in the array.
210 // If the parent type is a struct, this is the field index.
211 // Index: index of the element in the parent type.
212 // - OnDynamnicIndexing when the index is a non-constant value.
213 // This callback is only called when indexing into an array.
214 // Parameters:
215 // ElementType: the type of the elements stored in the parent array.
216 // Offset: the Value* containing the byte offset into the array.
217 // Return true if an error occured during the walk, false otherwise.
218 bool walkLogicalAccessChain(
219 GetElementPtrInst &GEP,
220 const std::function<void(Type *PointedType, uint64_t Index)>
221 &OnLiteralIndexing,
222 const std::function<void(Type *ElementType, Value *Offset)>
223 &OnDynamicIndexing);
224
225 // Returns the type accessed using the given GEP instruction by relying
226 // on the GEP type.
227 // FIXME: GEP types are not supposed to be used to retrieve the pointed
228 // type. This must be fixed.
229 Type *getGEPType(GetElementPtrInst *GEP);
230
231 // Returns the type accessed using the given GEP instruction by walking
232 // the source type using the GEP indices.
233 // FIXME: without help from the frontend, this method cannot reliably retrieve
234 // the stored type, nor can robustly determine the depth of the type
235 // we are accessing.
236 Type *getGEPTypeLogical(GetElementPtrInst *GEP);
237
238 Instruction *buildLogicalAccessChainFromGEP(GetElementPtrInst &GEP);
239
240public:
241 static char ID;
242 SPIRVEmitIntrinsics(SPIRVTargetMachine *TM = nullptr)
243 : ModulePass(ID), TM(TM) {}
244 Instruction *visitInstruction(Instruction &I) { return &I; }
245 Instruction *visitSwitchInst(SwitchInst &I);
246 Instruction *visitGetElementPtrInst(GetElementPtrInst &I);
247 Instruction *visitBitCastInst(BitCastInst &I);
248 Instruction *visitInsertElementInst(InsertElementInst &I);
249 Instruction *visitExtractElementInst(ExtractElementInst &I);
250 Instruction *visitInsertValueInst(InsertValueInst &I);
251 Instruction *visitExtractValueInst(ExtractValueInst &I);
252 Instruction *visitLoadInst(LoadInst &I);
253 Instruction *visitStoreInst(StoreInst &I);
254 Instruction *visitAllocaInst(AllocaInst &I);
255 Instruction *visitAtomicCmpXchgInst(AtomicCmpXchgInst &I);
256 Instruction *visitUnreachableInst(UnreachableInst &I);
257 Instruction *visitCallInst(CallInst &I);
258
259 StringRef getPassName() const override { return "SPIRV emit intrinsics"; }
260
261 bool runOnModule(Module &M) override;
262
263 void getAnalysisUsage(AnalysisUsage &AU) const override {
264 ModulePass::getAnalysisUsage(AU);
265 }
266};
267
268bool isConvergenceIntrinsic(const Instruction *I) {
269 const auto *II = dyn_cast<IntrinsicInst>(I);
270 if (!II)
271 return false;
272
273 return II->getIntrinsicID() == Intrinsic::experimental_convergence_entry ||
274 II->getIntrinsicID() == Intrinsic::experimental_convergence_loop ||
275 II->getIntrinsicID() == Intrinsic::experimental_convergence_anchor;
276}
277
278bool expectIgnoredInIRTranslation(const Instruction *I) {
279 const auto *II = dyn_cast<IntrinsicInst>(I);
280 if (!II)
281 return false;
282 switch (II->getIntrinsicID()) {
283 case Intrinsic::invariant_start:
284 case Intrinsic::spv_resource_handlefrombinding:
285 case Intrinsic::spv_resource_getpointer:
286 return true;
287 default:
288 return false;
289 }
290}
291
292// Returns the source pointer from `I` ignoring intermediate ptrcast.
293Value *getPointerRoot(Value *I) {
294 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
295 if (II->getIntrinsicID() == Intrinsic::spv_ptrcast) {
296 Value *V = II->getArgOperand(0);
297 return getPointerRoot(V);
298 }
299 }
300 return I;
301}
302
303} // namespace
304
305char SPIRVEmitIntrinsics::ID = 0;
306
307INITIALIZE_PASS(SPIRVEmitIntrinsics, "emit-intrinsics", "SPIRV emit intrinsics",
308 false, false)
309
310static inline bool isAssignTypeInstr(const Instruction *I) {
311 return isa<IntrinsicInst>(I) &&
312 cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::spv_assign_type;
313}
314
319
320static bool isAggrConstForceInt32(const Value *V) {
321 return isa<ConstantArray>(V) || isa<ConstantStruct>(V) ||
323 (isa<ConstantAggregateZero>(V) && !V->getType()->isVectorTy());
324}
325
327 if (isa<PHINode>(I))
328 B.SetInsertPoint(I->getParent()->getFirstNonPHIOrDbgOrAlloca());
329 else
330 B.SetInsertPoint(I);
331}
332
334 B.SetCurrentDebugLocation(I->getDebugLoc());
335 if (I->getType()->isVoidTy())
336 B.SetInsertPoint(I->getNextNode());
337 else
338 B.SetInsertPoint(*I->getInsertionPointAfterDef());
339}
340
342 if (const auto *Intr = dyn_cast<IntrinsicInst>(I)) {
343 switch (Intr->getIntrinsicID()) {
344 case Intrinsic::invariant_start:
345 case Intrinsic::invariant_end:
346 return false;
347 }
348 }
349 return true;
350}
351
352static inline void reportFatalOnTokenType(const Instruction *I) {
353 if (I->getType()->isTokenTy())
354 report_fatal_error("A token is encountered but SPIR-V without extensions "
355 "does not support token type",
356 false);
357}
358
360 if (!I->hasName() || I->getType()->isAggregateType() ||
361 expectIgnoredInIRTranslation(I))
362 return;
363
364 if (isa<CallBase>(I)) {
365 // TODO: this is a temporary workaround meant to prevent inserting internal
366 // noise into the generated binary; remove once we rework the entire
367 // aggregate removal machinery.
368 StringRef Name = I->getName();
369 if (Name.starts_with("spv.mutated_callsite"))
370 return;
371 if (Name.starts_with("spv.named_mutated_callsite"))
372 I->setName(Name.substr(Name.rfind('.') + 1));
373 }
376 LLVMContext &Ctx = I->getContext();
377 std::vector<Value *> Args = {
379 Ctx, MDNode::get(Ctx, MDString::get(Ctx, I->getName())))};
380 B.CreateIntrinsic(Intrinsic::spv_assign_name, {I->getType()}, Args);
381}
382
383void SPIRVEmitIntrinsics::replaceAllUsesWith(Value *Src, Value *Dest,
384 bool DeleteOld) {
385 GR->replaceAllUsesWith(Src, Dest, DeleteOld);
386 // Update uncomplete type records if any
387 if (isTodoType(Src)) {
388 if (DeleteOld)
389 eraseTodoType(Src);
390 insertTodoType(Dest);
391 }
392}
393
394void SPIRVEmitIntrinsics::replaceAllUsesWithAndErase(IRBuilder<> &B,
395 Instruction *Src,
396 Instruction *Dest,
397 bool DeleteOld) {
398 replaceAllUsesWith(Src, Dest, DeleteOld);
399 std::string Name = Src->hasName() ? Src->getName().str() : "";
400 Src->eraseFromParent();
401 if (!Name.empty()) {
402 Dest->setName(Name);
403 if (Named.insert(Dest).second)
404 emitAssignName(Dest, B);
405 }
406}
407
409 return SI && F->getCallingConv() == CallingConv::SPIR_KERNEL &&
410 isPointerTy(SI->getValueOperand()->getType()) &&
411 isa<Argument>(SI->getValueOperand());
412}
413
414// Maybe restore original function return type.
416 Type *Ty) {
418 if (!CI || CI->isIndirectCall() || CI->isInlineAsm() ||
420 return Ty;
421 if (Type *OriginalTy = GR->findMutated(CI->getCalledFunction()))
422 return OriginalTy;
423 return Ty;
424}
425
426// Reconstruct type with nested element types according to deduced type info.
427// Return nullptr if no detailed type info is available.
428Type *SPIRVEmitIntrinsics::reconstructType(Value *Op, bool UnknownElemTypeI8,
429 bool IsPostprocessing) {
430 Type *Ty = Op->getType();
431 if (auto *OpI = dyn_cast<Instruction>(Op))
432 Ty = restoreMutatedType(GR, OpI, Ty);
433 if (!isUntypedPointerTy(Ty))
434 return Ty;
435 // try to find the pointee type
436 if (Type *NestedTy = GR->findDeducedElementType(Op))
438 // not a pointer according to the type info (e.g., Event object)
439 CallInst *CI = GR->findAssignPtrTypeInstr(Op);
440 if (CI) {
441 MetadataAsValue *MD = cast<MetadataAsValue>(CI->getArgOperand(1));
442 return cast<ConstantAsMetadata>(MD->getMetadata())->getType();
443 }
444 if (UnknownElemTypeI8) {
445 if (!IsPostprocessing)
446 insertTodoType(Op);
447 return getTypedPointerWrapper(IntegerType::getInt8Ty(Op->getContext()),
449 }
450 return nullptr;
451}
452
453CallInst *SPIRVEmitIntrinsics::buildSpvPtrcast(Function *F, Value *Op,
454 Type *ElemTy) {
455 IRBuilder<> B(Op->getContext());
456 if (auto *OpI = dyn_cast<Instruction>(Op)) {
457 // spv_ptrcast's argument Op denotes an instruction that generates
458 // a value, and we may use getInsertionPointAfterDef()
460 } else if (auto *OpA = dyn_cast<Argument>(Op)) {
461 B.SetInsertPointPastAllocas(OpA->getParent());
462 B.SetCurrentDebugLocation(DebugLoc());
463 } else {
464 B.SetInsertPoint(F->getEntryBlock().getFirstNonPHIOrDbgOrAlloca());
465 }
466 Type *OpTy = Op->getType();
467 SmallVector<Type *, 2> Types = {OpTy, OpTy};
468 SmallVector<Value *, 2> Args = {Op, buildMD(getNormalizedPoisonValue(ElemTy)),
469 B.getInt32(getPointerAddressSpace(OpTy))};
470 CallInst *PtrCasted =
471 B.CreateIntrinsic(Intrinsic::spv_ptrcast, {Types}, Args);
472 GR->buildAssignPtr(B, ElemTy, PtrCasted);
473 return PtrCasted;
474}
475
476void SPIRVEmitIntrinsics::replaceUsesOfWithSpvPtrcast(
477 Value *Op, Type *ElemTy, Instruction *I,
478 DenseMap<Function *, CallInst *> Ptrcasts) {
479 Function *F = I->getParent()->getParent();
480 CallInst *PtrCastedI = nullptr;
481 auto It = Ptrcasts.find(F);
482 if (It == Ptrcasts.end()) {
483 PtrCastedI = buildSpvPtrcast(F, Op, ElemTy);
484 Ptrcasts[F] = PtrCastedI;
485 } else {
486 PtrCastedI = It->second;
487 }
488 I->replaceUsesOfWith(Op, PtrCastedI);
489}
490
491void SPIRVEmitIntrinsics::propagateElemType(
492 Value *Op, Type *ElemTy,
493 DenseSet<std::pair<Value *, Value *>> &VisitedSubst) {
494 DenseMap<Function *, CallInst *> Ptrcasts;
495 SmallVector<User *> Users(Op->users());
496 for (auto *U : Users) {
497 if (!isa<Instruction>(U) || isSpvIntrinsic(U))
498 continue;
499 if (!VisitedSubst.insert(std::make_pair(U, Op)).second)
500 continue;
502 // If the instruction was validated already, we need to keep it valid by
503 // keeping current Op type.
504 if (isa<GetElementPtrInst>(UI) ||
505 TypeValidated.find(UI) != TypeValidated.end())
506 replaceUsesOfWithSpvPtrcast(Op, ElemTy, UI, Ptrcasts);
507 }
508}
509
510void SPIRVEmitIntrinsics::propagateElemTypeRec(
511 Value *Op, Type *PtrElemTy, Type *CastElemTy,
512 DenseSet<std::pair<Value *, Value *>> &VisitedSubst) {
513 std::unordered_set<Value *> Visited;
514 DenseMap<Function *, CallInst *> Ptrcasts;
515 propagateElemTypeRec(Op, PtrElemTy, CastElemTy, VisitedSubst, Visited,
516 std::move(Ptrcasts));
517}
518
519void SPIRVEmitIntrinsics::propagateElemTypeRec(
520 Value *Op, Type *PtrElemTy, Type *CastElemTy,
521 DenseSet<std::pair<Value *, Value *>> &VisitedSubst,
522 std::unordered_set<Value *> &Visited,
523 DenseMap<Function *, CallInst *> Ptrcasts) {
524 if (!Visited.insert(Op).second)
525 return;
526 SmallVector<User *> Users(Op->users());
527 for (auto *U : Users) {
528 if (!isa<Instruction>(U) || isSpvIntrinsic(U))
529 continue;
530 if (!VisitedSubst.insert(std::make_pair(U, Op)).second)
531 continue;
533 // If the instruction was validated already, we need to keep it valid by
534 // keeping current Op type.
535 if (isa<GetElementPtrInst>(UI) ||
536 TypeValidated.find(UI) != TypeValidated.end())
537 replaceUsesOfWithSpvPtrcast(Op, CastElemTy, UI, Ptrcasts);
538 }
539}
540
541// Set element pointer type to the given value of ValueTy and tries to
542// specify this type further (recursively) by Operand value, if needed.
543
544Type *
545SPIRVEmitIntrinsics::deduceElementTypeByValueDeep(Type *ValueTy, Value *Operand,
546 bool UnknownElemTypeI8) {
547 std::unordered_set<Value *> Visited;
548 return deduceElementTypeByValueDeep(ValueTy, Operand, Visited,
549 UnknownElemTypeI8);
550}
551
552Type *SPIRVEmitIntrinsics::deduceElementTypeByValueDeep(
553 Type *ValueTy, Value *Operand, std::unordered_set<Value *> &Visited,
554 bool UnknownElemTypeI8) {
555 Type *Ty = ValueTy;
556 if (Operand) {
557 if (auto *PtrTy = dyn_cast<PointerType>(Ty)) {
558 if (Type *NestedTy =
559 deduceElementTypeHelper(Operand, Visited, UnknownElemTypeI8))
560 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
561 } else {
562 Ty = deduceNestedTypeHelper(dyn_cast<User>(Operand), Ty, Visited,
563 UnknownElemTypeI8);
564 }
565 }
566 return Ty;
567}
568
569// Traverse User instructions to deduce an element pointer type of the operand.
570Type *SPIRVEmitIntrinsics::deduceElementTypeByUsersDeep(
571 Value *Op, std::unordered_set<Value *> &Visited, bool UnknownElemTypeI8) {
572 if (!Op || !isPointerTy(Op->getType()) || isa<ConstantPointerNull>(Op) ||
574 return nullptr;
575
576 if (auto ElemTy = getPointeeType(Op->getType()))
577 return ElemTy;
578
579 // maybe we already know operand's element type
580 if (Type *KnownTy = GR->findDeducedElementType(Op))
581 return KnownTy;
582
583 for (User *OpU : Op->users()) {
584 if (Instruction *Inst = dyn_cast<Instruction>(OpU)) {
585 if (Type *Ty = deduceElementTypeHelper(Inst, Visited, UnknownElemTypeI8))
586 return Ty;
587 }
588 }
589 return nullptr;
590}
591
592// Implements what we know in advance about intrinsics and builtin calls
593// TODO: consider feasibility of this particular case to be generalized by
594// encoding knowledge about intrinsics and builtin calls by corresponding
595// specification rules
597 Function *CalledF, unsigned OpIdx) {
598 if ((DemangledName.starts_with("__spirv_ocl_printf(") ||
599 DemangledName.starts_with("printf(")) &&
600 OpIdx == 0)
601 return IntegerType::getInt8Ty(CalledF->getContext());
602 return nullptr;
603}
604
605// Deduce and return a successfully deduced Type of the Instruction,
606// or nullptr otherwise.
607Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(Value *I,
608 bool UnknownElemTypeI8) {
609 std::unordered_set<Value *> Visited;
610 return deduceElementTypeHelper(I, Visited, UnknownElemTypeI8);
611}
612
613void SPIRVEmitIntrinsics::maybeAssignPtrType(Type *&Ty, Value *Op, Type *RefTy,
614 bool UnknownElemTypeI8) {
615 if (isUntypedPointerTy(RefTy)) {
616 if (!UnknownElemTypeI8)
617 return;
618 insertTodoType(Op);
619 }
620 Ty = RefTy;
621}
622
623bool SPIRVEmitIntrinsics::walkLogicalAccessChain(
624 GetElementPtrInst &GEP,
625 const std::function<void(Type *, uint64_t)> &OnLiteralIndexing,
626 const std::function<void(Type *, Value *)> &OnDynamicIndexing) {
627 // We only rewrite i8* GEP. Other should be left as-is.
628 // Valid i8* GEP must always have a single index.
629 assert(GEP.getSourceElementType() ==
630 IntegerType::getInt8Ty(CurrF->getContext()));
631 assert(GEP.getNumIndices() == 1);
632
633 auto &DL = CurrF->getDataLayout();
634 Value *Src = getPointerRoot(GEP.getPointerOperand());
635 Type *CurType = deduceElementType(Src, true);
636
637 Value *Operand = *GEP.idx_begin();
638 ConstantInt *CI = dyn_cast<ConstantInt>(Operand);
639 if (!CI) {
640 ArrayType *AT = dyn_cast<ArrayType>(CurType);
641 // Operand is not constant. Either we have an array and accept it, or we
642 // give up.
643 if (AT)
644 OnDynamicIndexing(AT->getElementType(), Operand);
645 return AT == nullptr;
646 }
647
648 assert(CI);
649 uint64_t Offset = CI->getZExtValue();
650
651 do {
652 if (ArrayType *AT = dyn_cast<ArrayType>(CurType)) {
653 uint32_t EltTypeSize = DL.getTypeSizeInBits(AT->getElementType()) / 8;
654 assert(Offset < AT->getNumElements() * EltTypeSize);
655 uint64_t Index = Offset / EltTypeSize;
656 Offset = Offset - (Index * EltTypeSize);
657 CurType = AT->getElementType();
658 OnLiteralIndexing(CurType, Index);
659 } else if (StructType *ST = dyn_cast<StructType>(CurType)) {
660 uint32_t StructSize = DL.getTypeSizeInBits(ST) / 8;
661 assert(Offset < StructSize);
662 (void)StructSize;
663 const auto &STL = DL.getStructLayout(ST);
664 unsigned Element = STL->getElementContainingOffset(Offset);
665 Offset -= STL->getElementOffset(Element);
666 CurType = ST->getElementType(Element);
667 OnLiteralIndexing(CurType, Element);
668 } else if (auto *VT = dyn_cast<FixedVectorType>(CurType)) {
669 Type *EltTy = VT->getElementType();
670 TypeSize EltSizeBits = DL.getTypeSizeInBits(EltTy);
671 assert(EltSizeBits % 8 == 0 &&
672 "Element type size in bits must be a multiple of 8.");
673 uint32_t EltTypeSize = EltSizeBits / 8;
674 assert(Offset < VT->getNumElements() * EltTypeSize);
675 uint64_t Index = Offset / EltTypeSize;
676 Offset -= Index * EltTypeSize;
677 CurType = EltTy;
678 OnLiteralIndexing(CurType, Index);
679
680 } else {
681 // Unknown composite kind; give up.
682 return true;
683 }
684 } while (Offset > 0);
685
686 return false;
687}
688
690SPIRVEmitIntrinsics::buildLogicalAccessChainFromGEP(GetElementPtrInst &GEP) {
691 auto &DL = CurrF->getDataLayout();
692 IRBuilder<> B(GEP.getParent());
693 B.SetInsertPoint(&GEP);
694
695 std::vector<Value *> Indices;
696 Indices.push_back(ConstantInt::get(
697 IntegerType::getInt32Ty(CurrF->getContext()), 0, /* Signed= */ false));
698 walkLogicalAccessChain(
699 GEP,
700 [&Indices, &B](Type *EltType, uint64_t Index) {
701 Indices.push_back(
702 ConstantInt::get(B.getInt64Ty(), Index, /* Signed= */ false));
703 },
704 [&Indices, &B, &DL](Type *EltType, Value *Offset) {
705 uint32_t EltTypeSize = DL.getTypeSizeInBits(EltType) / 8;
706 Value *Index = B.CreateUDiv(
707 Offset, ConstantInt::get(Offset->getType(), EltTypeSize,
708 /* Signed= */ false));
709 Indices.push_back(Index);
710 });
711
712 SmallVector<Type *, 2> Types = {GEP.getType(), GEP.getOperand(0)->getType()};
714 Args.push_back(B.getInt1(GEP.isInBounds()));
715 Args.push_back(GEP.getOperand(0));
716 llvm::append_range(Args, Indices);
717 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args});
718 replaceAllUsesWithAndErase(B, &GEP, NewI);
719 return NewI;
720}
721
722Type *SPIRVEmitIntrinsics::getGEPTypeLogical(GetElementPtrInst *GEP) {
723
724 Type *CurType = GEP->getResultElementType();
725
726 bool Interrupted = walkLogicalAccessChain(
727 *GEP, [&CurType](Type *EltType, uint64_t Index) { CurType = EltType; },
728 [&CurType](Type *EltType, Value *Index) { CurType = EltType; });
729
730 return Interrupted ? GEP->getResultElementType() : CurType;
731}
732
733Type *SPIRVEmitIntrinsics::getGEPType(GetElementPtrInst *Ref) {
734 if (Ref->getSourceElementType() ==
735 IntegerType::getInt8Ty(CurrF->getContext()) &&
737 return getGEPTypeLogical(Ref);
738 }
739
740 Type *Ty = nullptr;
741 // TODO: not sure if GetElementPtrInst::getTypeAtIndex() does anything
742 // useful here
743 if (isNestedPointer(Ref->getSourceElementType())) {
744 Ty = Ref->getSourceElementType();
745 for (Use &U : drop_begin(Ref->indices()))
746 Ty = GetElementPtrInst::getTypeAtIndex(Ty, U.get());
747 } else {
748 Ty = Ref->getResultElementType();
749 }
750 return Ty;
751}
752
753Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(
754 Value *I, std::unordered_set<Value *> &Visited, bool UnknownElemTypeI8,
755 bool IgnoreKnownType) {
756 // allow to pass nullptr as an argument
757 if (!I)
758 return nullptr;
759
760 // maybe already known
761 if (!IgnoreKnownType)
762 if (Type *KnownTy = GR->findDeducedElementType(I))
763 return KnownTy;
764
765 // maybe a cycle
766 if (!Visited.insert(I).second)
767 return nullptr;
768
769 // fallback value in case when we fail to deduce a type
770 Type *Ty = nullptr;
771 // look for known basic patterns of type inference
772 if (auto *Ref = dyn_cast<AllocaInst>(I)) {
773 maybeAssignPtrType(Ty, I, Ref->getAllocatedType(), UnknownElemTypeI8);
774 } else if (auto *Ref = dyn_cast<GetElementPtrInst>(I)) {
775 Ty = getGEPType(Ref);
776 } else if (auto *Ref = dyn_cast<LoadInst>(I)) {
777 Value *Op = Ref->getPointerOperand();
778 Type *KnownTy = GR->findDeducedElementType(Op);
779 if (!KnownTy)
780 KnownTy = Op->getType();
781 if (Type *ElemTy = getPointeeType(KnownTy))
782 maybeAssignPtrType(Ty, I, ElemTy, UnknownElemTypeI8);
783 } else if (auto *Ref = dyn_cast<GlobalValue>(I)) {
784 if (auto *Fn = dyn_cast<Function>(Ref)) {
785 Ty = SPIRV::getOriginalFunctionType(*Fn);
786 GR->addDeducedElementType(I, Ty);
787 } else {
788 Ty = deduceElementTypeByValueDeep(
789 Ref->getValueType(),
790 Ref->getNumOperands() > 0 ? Ref->getOperand(0) : nullptr, Visited,
791 UnknownElemTypeI8);
792 }
793 } else if (auto *Ref = dyn_cast<AddrSpaceCastInst>(I)) {
794 Type *RefTy = deduceElementTypeHelper(Ref->getPointerOperand(), Visited,
795 UnknownElemTypeI8);
796 maybeAssignPtrType(Ty, I, RefTy, UnknownElemTypeI8);
797 } else if (auto *Ref = dyn_cast<IntToPtrInst>(I)) {
798 maybeAssignPtrType(Ty, I, Ref->getDestTy(), UnknownElemTypeI8);
799 } else if (auto *Ref = dyn_cast<BitCastInst>(I)) {
800 if (Type *Src = Ref->getSrcTy(), *Dest = Ref->getDestTy();
801 isPointerTy(Src) && isPointerTy(Dest))
802 Ty = deduceElementTypeHelper(Ref->getOperand(0), Visited,
803 UnknownElemTypeI8);
804 } else if (auto *Ref = dyn_cast<AtomicCmpXchgInst>(I)) {
805 Value *Op = Ref->getNewValOperand();
806 if (isPointerTy(Op->getType()))
807 Ty = deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8);
808 } else if (auto *Ref = dyn_cast<AtomicRMWInst>(I)) {
809 Value *Op = Ref->getValOperand();
810 if (isPointerTy(Op->getType()))
811 Ty = deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8);
812 } else if (auto *Ref = dyn_cast<PHINode>(I)) {
813 Type *BestTy = nullptr;
814 unsigned MaxN = 1;
815 DenseMap<Type *, unsigned> PhiTys;
816 for (int i = Ref->getNumIncomingValues() - 1; i >= 0; --i) {
817 Ty = deduceElementTypeByUsersDeep(Ref->getIncomingValue(i), Visited,
818 UnknownElemTypeI8);
819 if (!Ty)
820 continue;
821 auto It = PhiTys.try_emplace(Ty, 1);
822 if (!It.second) {
823 ++It.first->second;
824 if (It.first->second > MaxN) {
825 MaxN = It.first->second;
826 BestTy = Ty;
827 }
828 }
829 }
830 if (BestTy)
831 Ty = BestTy;
832 } else if (auto *Ref = dyn_cast<SelectInst>(I)) {
833 for (Value *Op : {Ref->getTrueValue(), Ref->getFalseValue()}) {
834 Ty = deduceElementTypeByUsersDeep(Op, Visited, UnknownElemTypeI8);
835 if (Ty)
836 break;
837 }
838 } else if (auto *CI = dyn_cast<CallInst>(I)) {
839 static StringMap<unsigned> ResTypeByArg = {
840 {"to_global", 0},
841 {"to_local", 0},
842 {"to_private", 0},
843 {"__spirv_GenericCastToPtr_ToGlobal", 0},
844 {"__spirv_GenericCastToPtr_ToLocal", 0},
845 {"__spirv_GenericCastToPtr_ToPrivate", 0},
846 {"__spirv_GenericCastToPtrExplicit_ToGlobal", 0},
847 {"__spirv_GenericCastToPtrExplicit_ToLocal", 0},
848 {"__spirv_GenericCastToPtrExplicit_ToPrivate", 0}};
849 // TODO: maybe improve performance by caching demangled names
850
852 if (II && II->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
853 auto *HandleType = cast<TargetExtType>(II->getOperand(0)->getType());
854 if (HandleType->getTargetExtName() == "spirv.Image" ||
855 HandleType->getTargetExtName() == "spirv.SignedImage") {
856 for (User *U : II->users()) {
857 Ty = cast<Instruction>(U)->getAccessType();
858 if (Ty)
859 break;
860 }
861 } else if (HandleType->getTargetExtName() == "spirv.VulkanBuffer") {
862 // This call is supposed to index into an array
863 Ty = HandleType->getTypeParameter(0);
864 if (Ty->isArrayTy())
865 Ty = Ty->getArrayElementType();
866 else {
867 assert(Ty && Ty->isStructTy());
868 uint32_t Index = cast<ConstantInt>(II->getOperand(1))->getZExtValue();
869 Ty = cast<StructType>(Ty)->getElementType(Index);
870 }
872 } else {
873 llvm_unreachable("Unknown handle type for spv_resource_getpointer.");
874 }
875 } else if (II && II->getIntrinsicID() ==
876 Intrinsic::spv_generic_cast_to_ptr_explicit) {
877 Ty = deduceElementTypeHelper(CI->getArgOperand(0), Visited,
878 UnknownElemTypeI8);
879 } else if (Function *CalledF = CI->getCalledFunction()) {
880 std::string DemangledName =
881 getOclOrSpirvBuiltinDemangledName(CalledF->getName());
882 if (DemangledName.length() > 0)
883 DemangledName = SPIRV::lookupBuiltinNameHelper(DemangledName);
884 auto AsArgIt = ResTypeByArg.find(DemangledName);
885 if (AsArgIt != ResTypeByArg.end())
886 Ty = deduceElementTypeHelper(CI->getArgOperand(AsArgIt->second),
887 Visited, UnknownElemTypeI8);
888 else if (Type *KnownRetTy = GR->findDeducedElementType(CalledF))
889 Ty = KnownRetTy;
890 }
891 }
892
893 // remember the found relationship
894 if (Ty && !IgnoreKnownType) {
895 // specify nested types if needed, otherwise return unchanged
897 }
898
899 return Ty;
900}
901
902// Re-create a type of the value if it has untyped pointer fields, also nested.
903// Return the original value type if no corrections of untyped pointer
904// information is found or needed.
905Type *SPIRVEmitIntrinsics::deduceNestedTypeHelper(User *U,
906 bool UnknownElemTypeI8) {
907 std::unordered_set<Value *> Visited;
908 return deduceNestedTypeHelper(U, U->getType(), Visited, UnknownElemTypeI8);
909}
910
911Type *SPIRVEmitIntrinsics::deduceNestedTypeHelper(
912 User *U, Type *OrigTy, std::unordered_set<Value *> &Visited,
913 bool UnknownElemTypeI8) {
914 if (!U)
915 return OrigTy;
916
917 // maybe already known
918 if (Type *KnownTy = GR->findDeducedCompositeType(U))
919 return KnownTy;
920
921 // maybe a cycle
922 if (!Visited.insert(U).second)
923 return OrigTy;
924
925 if (isa<StructType>(OrigTy)) {
927 bool Change = false;
928 for (unsigned i = 0; i < U->getNumOperands(); ++i) {
929 Value *Op = U->getOperand(i);
930 assert(Op && "Operands should not be null.");
931 Type *OpTy = Op->getType();
932 Type *Ty = OpTy;
933 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
934 if (Type *NestedTy =
935 deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8))
936 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
937 } else {
938 Ty = deduceNestedTypeHelper(dyn_cast<User>(Op), OpTy, Visited,
939 UnknownElemTypeI8);
940 }
941 Tys.push_back(Ty);
942 Change |= Ty != OpTy;
943 }
944 if (Change) {
945 Type *NewTy = StructType::create(Tys);
946 GR->addDeducedCompositeType(U, NewTy);
947 return NewTy;
948 }
949 } else if (auto *ArrTy = dyn_cast<ArrayType>(OrigTy)) {
950 if (Value *Op = U->getNumOperands() > 0 ? U->getOperand(0) : nullptr) {
951 Type *OpTy = ArrTy->getElementType();
952 Type *Ty = OpTy;
953 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
954 if (Type *NestedTy =
955 deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8))
956 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
957 } else {
958 Ty = deduceNestedTypeHelper(dyn_cast<User>(Op), OpTy, Visited,
959 UnknownElemTypeI8);
960 }
961 if (Ty != OpTy) {
962 Type *NewTy = ArrayType::get(Ty, ArrTy->getNumElements());
963 GR->addDeducedCompositeType(U, NewTy);
964 return NewTy;
965 }
966 }
967 } else if (auto *VecTy = dyn_cast<VectorType>(OrigTy)) {
968 if (Value *Op = U->getNumOperands() > 0 ? U->getOperand(0) : nullptr) {
969 Type *OpTy = VecTy->getElementType();
970 Type *Ty = OpTy;
971 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
972 if (Type *NestedTy =
973 deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8))
974 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
975 } else {
976 Ty = deduceNestedTypeHelper(dyn_cast<User>(Op), OpTy, Visited,
977 UnknownElemTypeI8);
978 }
979 if (Ty != OpTy) {
980 Type *NewTy = VectorType::get(Ty, VecTy->getElementCount());
982 return NewTy;
983 }
984 }
985 }
986
987 return OrigTy;
988}
989
990Type *SPIRVEmitIntrinsics::deduceElementType(Value *I, bool UnknownElemTypeI8) {
991 if (Type *Ty = deduceElementTypeHelper(I, UnknownElemTypeI8))
992 return Ty;
993 if (!UnknownElemTypeI8)
994 return nullptr;
995 insertTodoType(I);
996 return IntegerType::getInt8Ty(I->getContext());
997}
998
1000 Value *PointerOperand) {
1001 Type *PointeeTy = GR->findDeducedElementType(PointerOperand);
1002 if (PointeeTy && !isUntypedPointerTy(PointeeTy))
1003 return nullptr;
1004 auto *PtrTy = dyn_cast<PointerType>(I->getType());
1005 if (!PtrTy)
1006 return I->getType();
1007 if (Type *NestedTy = GR->findDeducedElementType(I))
1008 return getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
1009 return nullptr;
1010}
1011
1012// Try to deduce element type for a call base. Returns false if this is an
1013// indirect function invocation, and true otherwise.
1014bool SPIRVEmitIntrinsics::deduceOperandElementTypeCalledFunction(
1015 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
1016 Type *&KnownElemTy, bool &Incomplete) {
1017 Function *CalledF = CI->getCalledFunction();
1018 if (!CalledF)
1019 return false;
1020 std::string DemangledName =
1022 if (DemangledName.length() > 0 &&
1023 !StringRef(DemangledName).starts_with("llvm.")) {
1024 const SPIRVSubtarget &ST = TM->getSubtarget<SPIRVSubtarget>(*CalledF);
1025 auto [Grp, Opcode, ExtNo] = SPIRV::mapBuiltinToOpcode(
1026 DemangledName, ST.getPreferredInstructionSet());
1027 if (Opcode == SPIRV::OpGroupAsyncCopy) {
1028 for (unsigned i = 0, PtrCnt = 0; i < CI->arg_size() && PtrCnt < 2; ++i) {
1029 Value *Op = CI->getArgOperand(i);
1030 if (!isPointerTy(Op->getType()))
1031 continue;
1032 ++PtrCnt;
1033 if (Type *ElemTy = GR->findDeducedElementType(Op))
1034 KnownElemTy = ElemTy; // src will rewrite dest if both are defined
1035 Ops.push_back(std::make_pair(Op, i));
1036 }
1037 } else if (Grp == SPIRV::Atomic || Grp == SPIRV::AtomicFloating) {
1038 if (CI->arg_size() == 0)
1039 return true;
1040 Value *Op = CI->getArgOperand(0);
1041 if (!isPointerTy(Op->getType()))
1042 return true;
1043 switch (Opcode) {
1044 case SPIRV::OpAtomicFAddEXT:
1045 case SPIRV::OpAtomicFMinEXT:
1046 case SPIRV::OpAtomicFMaxEXT:
1047 case SPIRV::OpAtomicLoad:
1048 case SPIRV::OpAtomicCompareExchangeWeak:
1049 case SPIRV::OpAtomicCompareExchange:
1050 case SPIRV::OpAtomicExchange:
1051 case SPIRV::OpAtomicIAdd:
1052 case SPIRV::OpAtomicISub:
1053 case SPIRV::OpAtomicOr:
1054 case SPIRV::OpAtomicXor:
1055 case SPIRV::OpAtomicAnd:
1056 case SPIRV::OpAtomicUMin:
1057 case SPIRV::OpAtomicUMax:
1058 case SPIRV::OpAtomicSMin:
1059 case SPIRV::OpAtomicSMax: {
1060 KnownElemTy = isPointerTy(CI->getType()) ? getAtomicElemTy(GR, CI, Op)
1061 : CI->getType();
1062 if (!KnownElemTy)
1063 return true;
1064 Incomplete = isTodoType(Op);
1065 Ops.push_back(std::make_pair(Op, 0));
1066 } break;
1067 case SPIRV::OpAtomicStore: {
1068 if (CI->arg_size() < 4)
1069 return true;
1070 Value *ValOp = CI->getArgOperand(3);
1071 KnownElemTy = isPointerTy(ValOp->getType())
1072 ? getAtomicElemTy(GR, CI, Op)
1073 : ValOp->getType();
1074 if (!KnownElemTy)
1075 return true;
1076 Incomplete = isTodoType(Op);
1077 Ops.push_back(std::make_pair(Op, 0));
1078 } break;
1079 }
1080 }
1081 }
1082 return true;
1083}
1084
1085// Try to deduce element type for a function pointer.
1086void SPIRVEmitIntrinsics::deduceOperandElementTypeFunctionPointer(
1087 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
1088 Type *&KnownElemTy, bool IsPostprocessing) {
1089 Value *Op = CI->getCalledOperand();
1090 if (!Op || !isPointerTy(Op->getType()))
1091 return;
1092 Ops.push_back(std::make_pair(Op, std::numeric_limits<unsigned>::max()));
1093 FunctionType *FTy = SPIRV::getOriginalFunctionType(*CI);
1094 bool IsNewFTy = false, IsIncomplete = false;
1096 for (auto &&[ParmIdx, Arg] : llvm::enumerate(CI->args())) {
1097 Type *ArgTy = Arg->getType();
1098 if (ArgTy->isPointerTy()) {
1099 if (Type *ElemTy = GR->findDeducedElementType(Arg)) {
1100 IsNewFTy = true;
1101 ArgTy = getTypedPointerWrapper(ElemTy, getPointerAddressSpace(ArgTy));
1102 if (isTodoType(Arg))
1103 IsIncomplete = true;
1104 } else {
1105 IsIncomplete = true;
1106 }
1107 } else {
1108 ArgTy = FTy->getFunctionParamType(ParmIdx);
1109 }
1110 ArgTys.push_back(ArgTy);
1111 }
1112 Type *RetTy = FTy->getReturnType();
1113 if (CI->getType()->isPointerTy()) {
1114 if (Type *ElemTy = GR->findDeducedElementType(CI)) {
1115 IsNewFTy = true;
1116 RetTy =
1118 if (isTodoType(CI))
1119 IsIncomplete = true;
1120 } else {
1121 IsIncomplete = true;
1122 }
1123 }
1124 if (!IsPostprocessing && IsIncomplete)
1125 insertTodoType(Op);
1126 KnownElemTy =
1127 IsNewFTy ? FunctionType::get(RetTy, ArgTys, FTy->isVarArg()) : FTy;
1128}
1129
1130bool SPIRVEmitIntrinsics::deduceOperandElementTypeFunctionRet(
1131 Instruction *I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
1132 const SmallPtrSet<Value *, 4> *AskOps, bool IsPostprocessing,
1133 Type *&KnownElemTy, Value *Op, Function *F) {
1134 KnownElemTy = GR->findDeducedElementType(F);
1135 if (KnownElemTy)
1136 return false;
1137 if (Type *OpElemTy = GR->findDeducedElementType(Op)) {
1138 OpElemTy = normalizeType(OpElemTy);
1139 GR->addDeducedElementType(F, OpElemTy);
1140 GR->addReturnType(
1141 F, TypedPointerType::get(OpElemTy,
1142 getPointerAddressSpace(F->getReturnType())));
1143 // non-recursive update of types in function uses
1144 DenseSet<std::pair<Value *, Value *>> VisitedSubst{std::make_pair(I, Op)};
1145 for (User *U : F->users()) {
1146 CallInst *CI = dyn_cast<CallInst>(U);
1147 if (!CI || CI->getCalledFunction() != F)
1148 continue;
1149 if (CallInst *AssignCI = GR->findAssignPtrTypeInstr(CI)) {
1150 if (Type *PrevElemTy = GR->findDeducedElementType(CI)) {
1151 GR->updateAssignType(AssignCI, CI,
1152 getNormalizedPoisonValue(OpElemTy));
1153 propagateElemType(CI, PrevElemTy, VisitedSubst);
1154 }
1155 }
1156 }
1157 // Non-recursive update of types in the function uncomplete returns.
1158 // This may happen just once per a function, the latch is a pair of
1159 // findDeducedElementType(F) / addDeducedElementType(F, ...).
1160 // With or without the latch it is a non-recursive call due to
1161 // IncompleteRets set to nullptr in this call.
1162 if (IncompleteRets)
1163 for (Instruction *IncompleteRetI : *IncompleteRets)
1164 deduceOperandElementType(IncompleteRetI, nullptr, AskOps,
1165 IsPostprocessing);
1166 } else if (IncompleteRets) {
1167 IncompleteRets->insert(I);
1168 }
1169 TypeValidated.insert(I);
1170 return true;
1171}
1172
1173// If the Instruction has Pointer operands with unresolved types, this function
1174// tries to deduce them. If the Instruction has Pointer operands with known
1175// types which differ from expected, this function tries to insert a bitcast to
1176// resolve the issue.
1177void SPIRVEmitIntrinsics::deduceOperandElementType(
1178 Instruction *I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
1179 const SmallPtrSet<Value *, 4> *AskOps, bool IsPostprocessing) {
1181 Type *KnownElemTy = nullptr;
1182 bool Incomplete = false;
1183 // look for known basic patterns of type inference
1184 if (auto *Ref = dyn_cast<PHINode>(I)) {
1185 if (!isPointerTy(I->getType()) ||
1186 !(KnownElemTy = GR->findDeducedElementType(I)))
1187 return;
1188 Incomplete = isTodoType(I);
1189 for (unsigned i = 0; i < Ref->getNumIncomingValues(); i++) {
1190 Value *Op = Ref->getIncomingValue(i);
1191 if (isPointerTy(Op->getType()))
1192 Ops.push_back(std::make_pair(Op, i));
1193 }
1194 } else if (auto *Ref = dyn_cast<AddrSpaceCastInst>(I)) {
1195 KnownElemTy = GR->findDeducedElementType(I);
1196 if (!KnownElemTy)
1197 return;
1198 Incomplete = isTodoType(I);
1199 Ops.push_back(std::make_pair(Ref->getPointerOperand(), 0));
1200 } else if (auto *Ref = dyn_cast<BitCastInst>(I)) {
1201 if (!isPointerTy(I->getType()))
1202 return;
1203 KnownElemTy = GR->findDeducedElementType(I);
1204 if (!KnownElemTy)
1205 return;
1206 Incomplete = isTodoType(I);
1207 Ops.push_back(std::make_pair(Ref->getOperand(0), 0));
1208 } else if (auto *Ref = dyn_cast<GetElementPtrInst>(I)) {
1209 if (GR->findDeducedElementType(Ref->getPointerOperand()))
1210 return;
1211 KnownElemTy = Ref->getSourceElementType();
1212 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1214 } else if (auto *Ref = dyn_cast<LoadInst>(I)) {
1215 KnownElemTy = I->getType();
1216 if (isUntypedPointerTy(KnownElemTy))
1217 return;
1218 Type *PointeeTy = GR->findDeducedElementType(Ref->getPointerOperand());
1219 if (PointeeTy && !isUntypedPointerTy(PointeeTy))
1220 return;
1221 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1223 } else if (auto *Ref = dyn_cast<StoreInst>(I)) {
1224 if (!(KnownElemTy =
1225 reconstructType(Ref->getValueOperand(), false, IsPostprocessing)))
1226 return;
1227 Type *PointeeTy = GR->findDeducedElementType(Ref->getPointerOperand());
1228 if (PointeeTy && !isUntypedPointerTy(PointeeTy))
1229 return;
1230 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1232 } else if (auto *Ref = dyn_cast<AtomicCmpXchgInst>(I)) {
1233 KnownElemTy = isPointerTy(I->getType())
1234 ? getAtomicElemTy(GR, I, Ref->getPointerOperand())
1235 : I->getType();
1236 if (!KnownElemTy)
1237 return;
1238 Incomplete = isTodoType(Ref->getPointerOperand());
1239 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1241 } else if (auto *Ref = dyn_cast<AtomicRMWInst>(I)) {
1242 KnownElemTy = isPointerTy(I->getType())
1243 ? getAtomicElemTy(GR, I, Ref->getPointerOperand())
1244 : I->getType();
1245 if (!KnownElemTy)
1246 return;
1247 Incomplete = isTodoType(Ref->getPointerOperand());
1248 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1250 } else if (auto *Ref = dyn_cast<SelectInst>(I)) {
1251 if (!isPointerTy(I->getType()) ||
1252 !(KnownElemTy = GR->findDeducedElementType(I)))
1253 return;
1254 Incomplete = isTodoType(I);
1255 for (unsigned i = 0; i < Ref->getNumOperands(); i++) {
1256 Value *Op = Ref->getOperand(i);
1257 if (isPointerTy(Op->getType()))
1258 Ops.push_back(std::make_pair(Op, i));
1259 }
1260 } else if (auto *Ref = dyn_cast<ReturnInst>(I)) {
1261 if (!isPointerTy(CurrF->getReturnType()))
1262 return;
1263 Value *Op = Ref->getReturnValue();
1264 if (!Op)
1265 return;
1266 if (deduceOperandElementTypeFunctionRet(I, IncompleteRets, AskOps,
1267 IsPostprocessing, KnownElemTy, Op,
1268 CurrF))
1269 return;
1270 Incomplete = isTodoType(CurrF);
1271 Ops.push_back(std::make_pair(Op, 0));
1272 } else if (auto *Ref = dyn_cast<ICmpInst>(I)) {
1273 if (!isPointerTy(Ref->getOperand(0)->getType()))
1274 return;
1275 Value *Op0 = Ref->getOperand(0);
1276 Value *Op1 = Ref->getOperand(1);
1277 bool Incomplete0 = isTodoType(Op0);
1278 bool Incomplete1 = isTodoType(Op1);
1279 Type *ElemTy1 = GR->findDeducedElementType(Op1);
1280 Type *ElemTy0 = (Incomplete0 && !Incomplete1 && ElemTy1)
1281 ? nullptr
1282 : GR->findDeducedElementType(Op0);
1283 if (ElemTy0) {
1284 KnownElemTy = ElemTy0;
1285 Incomplete = Incomplete0;
1286 Ops.push_back(std::make_pair(Op1, 1));
1287 } else if (ElemTy1) {
1288 KnownElemTy = ElemTy1;
1289 Incomplete = Incomplete1;
1290 Ops.push_back(std::make_pair(Op0, 0));
1291 }
1292 } else if (CallInst *CI = dyn_cast<CallInst>(I)) {
1293 if (!CI->isIndirectCall())
1294 deduceOperandElementTypeCalledFunction(CI, Ops, KnownElemTy, Incomplete);
1295 else if (HaveFunPtrs)
1296 deduceOperandElementTypeFunctionPointer(CI, Ops, KnownElemTy,
1297 IsPostprocessing);
1298 }
1299
1300 // There is no enough info to deduce types or all is valid.
1301 if (!KnownElemTy || Ops.size() == 0)
1302 return;
1303
1304 LLVMContext &Ctx = CurrF->getContext();
1305 IRBuilder<> B(Ctx);
1306 for (auto &OpIt : Ops) {
1307 Value *Op = OpIt.first;
1308 if (AskOps && !AskOps->contains(Op))
1309 continue;
1310 Type *AskTy = nullptr;
1311 CallInst *AskCI = nullptr;
1312 if (IsPostprocessing && AskOps) {
1313 AskTy = GR->findDeducedElementType(Op);
1314 AskCI = GR->findAssignPtrTypeInstr(Op);
1315 assert(AskTy && AskCI);
1316 }
1317 Type *Ty = AskTy ? AskTy : GR->findDeducedElementType(Op);
1318 if (Ty == KnownElemTy)
1319 continue;
1320 Value *OpTyVal = getNormalizedPoisonValue(KnownElemTy);
1321 Type *OpTy = Op->getType();
1322 if (Op->hasUseList() &&
1323 (!Ty || AskTy || isUntypedPointerTy(Ty) || isTodoType(Op))) {
1324 Type *PrevElemTy = GR->findDeducedElementType(Op);
1325 GR->addDeducedElementType(Op, normalizeType(KnownElemTy));
1326 // check if KnownElemTy is complete
1327 if (!Incomplete)
1328 eraseTodoType(Op);
1329 else if (!IsPostprocessing)
1330 insertTodoType(Op);
1331 // check if there is existing Intrinsic::spv_assign_ptr_type instruction
1332 CallInst *AssignCI = AskCI ? AskCI : GR->findAssignPtrTypeInstr(Op);
1333 if (AssignCI == nullptr) {
1334 Instruction *User = dyn_cast<Instruction>(Op->use_begin()->get());
1335 setInsertPointSkippingPhis(B, User ? User->getNextNode() : I);
1336 CallInst *CI =
1337 buildIntrWithMD(Intrinsic::spv_assign_ptr_type, {OpTy}, OpTyVal, Op,
1338 {B.getInt32(getPointerAddressSpace(OpTy))}, B);
1339 GR->addAssignPtrTypeInstr(Op, CI);
1340 } else {
1341 GR->updateAssignType(AssignCI, Op, OpTyVal);
1342 DenseSet<std::pair<Value *, Value *>> VisitedSubst{
1343 std::make_pair(I, Op)};
1344 propagateElemTypeRec(Op, KnownElemTy, PrevElemTy, VisitedSubst);
1345 }
1346 } else {
1347 eraseTodoType(Op);
1348 CallInst *PtrCastI =
1349 buildSpvPtrcast(I->getParent()->getParent(), Op, KnownElemTy);
1350 if (OpIt.second == std::numeric_limits<unsigned>::max())
1351 dyn_cast<CallInst>(I)->setCalledOperand(PtrCastI);
1352 else
1353 I->setOperand(OpIt.second, PtrCastI);
1354 }
1355 }
1356 TypeValidated.insert(I);
1357}
1358
1359void SPIRVEmitIntrinsics::replaceMemInstrUses(Instruction *Old,
1360 Instruction *New,
1361 IRBuilder<> &B) {
1362 while (!Old->user_empty()) {
1363 auto *U = Old->user_back();
1364 if (isAssignTypeInstr(U)) {
1365 B.SetInsertPoint(U);
1366 SmallVector<Value *, 2> Args = {New, U->getOperand(1)};
1367 CallInst *AssignCI =
1368 B.CreateIntrinsic(Intrinsic::spv_assign_type, {New->getType()}, Args);
1369 GR->addAssignPtrTypeInstr(New, AssignCI);
1370 U->eraseFromParent();
1371 } else if (isMemInstrToReplace(U) || isa<ReturnInst>(U) ||
1372 isa<CallInst>(U)) {
1373 U->replaceUsesOfWith(Old, New);
1374 } else {
1375 llvm_unreachable("illegal aggregate intrinsic user");
1376 }
1377 }
1378 New->copyMetadata(*Old);
1379 Old->eraseFromParent();
1380}
1381
1382void SPIRVEmitIntrinsics::preprocessUndefs(IRBuilder<> &B) {
1383 std::queue<Instruction *> Worklist;
1384 for (auto &I : instructions(CurrF))
1385 Worklist.push(&I);
1386
1387 while (!Worklist.empty()) {
1388 Instruction *I = Worklist.front();
1389 bool BPrepared = false;
1390 Worklist.pop();
1391
1392 for (auto &Op : I->operands()) {
1393 auto *AggrUndef = dyn_cast<UndefValue>(Op);
1394 if (!AggrUndef || !Op->getType()->isAggregateType())
1395 continue;
1396
1397 if (!BPrepared) {
1399 BPrepared = true;
1400 }
1401 auto *IntrUndef = B.CreateIntrinsic(Intrinsic::spv_undef, {});
1402 Worklist.push(IntrUndef);
1403 I->replaceUsesOfWith(Op, IntrUndef);
1404 AggrConsts[IntrUndef] = AggrUndef;
1405 AggrConstTypes[IntrUndef] = AggrUndef->getType();
1406 }
1407 }
1408}
1409
1410void SPIRVEmitIntrinsics::preprocessCompositeConstants(IRBuilder<> &B) {
1411 std::queue<Instruction *> Worklist;
1412 for (auto &I : instructions(CurrF))
1413 Worklist.push(&I);
1414
1415 while (!Worklist.empty()) {
1416 auto *I = Worklist.front();
1417 bool IsPhi = isa<PHINode>(I), BPrepared = false;
1418 assert(I);
1419 bool KeepInst = false;
1420 for (const auto &Op : I->operands()) {
1421 Constant *AggrConst = nullptr;
1422 Type *ResTy = nullptr;
1423 if (auto *COp = dyn_cast<ConstantVector>(Op)) {
1424 AggrConst = COp;
1425 ResTy = COp->getType();
1426 } else if (auto *COp = dyn_cast<ConstantArray>(Op)) {
1427 AggrConst = COp;
1428 ResTy = B.getInt32Ty();
1429 } else if (auto *COp = dyn_cast<ConstantStruct>(Op)) {
1430 AggrConst = COp;
1431 ResTy = B.getInt32Ty();
1432 } else if (auto *COp = dyn_cast<ConstantDataArray>(Op)) {
1433 AggrConst = COp;
1434 ResTy = B.getInt32Ty();
1435 } else if (auto *COp = dyn_cast<ConstantAggregateZero>(Op)) {
1436 AggrConst = COp;
1437 ResTy = Op->getType()->isVectorTy() ? COp->getType() : B.getInt32Ty();
1438 }
1439 if (AggrConst) {
1441 if (auto *COp = dyn_cast<ConstantDataSequential>(Op))
1442 for (unsigned i = 0; i < COp->getNumElements(); ++i)
1443 Args.push_back(COp->getElementAsConstant(i));
1444 else
1445 llvm::append_range(Args, AggrConst->operands());
1446 if (!BPrepared) {
1447 IsPhi ? B.SetInsertPointPastAllocas(I->getParent()->getParent())
1448 : B.SetInsertPoint(I);
1449 BPrepared = true;
1450 }
1451 auto *CI =
1452 B.CreateIntrinsic(Intrinsic::spv_const_composite, {ResTy}, {Args});
1453 Worklist.push(CI);
1454 I->replaceUsesOfWith(Op, CI);
1455 KeepInst = true;
1456 AggrConsts[CI] = AggrConst;
1457 AggrConstTypes[CI] = deduceNestedTypeHelper(AggrConst, false);
1458 }
1459 }
1460 if (!KeepInst)
1461 Worklist.pop();
1462 }
1463}
1464
1466 IRBuilder<> &B) {
1467 LLVMContext &Ctx = I->getContext();
1469 B.CreateIntrinsic(Intrinsic::spv_assign_decoration, {I->getType()},
1470 {I, MetadataAsValue::get(Ctx, MDNode::get(Ctx, {Node}))});
1471}
1472
1474 unsigned RoundingModeDeco,
1475 IRBuilder<> &B) {
1476 LLVMContext &Ctx = I->getContext();
1478 MDNode *RoundingModeNode = MDNode::get(
1479 Ctx,
1481 ConstantInt::get(Int32Ty, SPIRV::Decoration::FPRoundingMode)),
1482 ConstantAsMetadata::get(ConstantInt::get(Int32Ty, RoundingModeDeco))});
1483 createDecorationIntrinsic(I, RoundingModeNode, B);
1484}
1485
1487 IRBuilder<> &B) {
1488 LLVMContext &Ctx = I->getContext();
1490 MDNode *SaturatedConversionNode =
1491 MDNode::get(Ctx, {ConstantAsMetadata::get(ConstantInt::get(
1492 Int32Ty, SPIRV::Decoration::SaturatedConversion))});
1493 createDecorationIntrinsic(I, SaturatedConversionNode, B);
1494}
1495
1497 if (auto *CI = dyn_cast<CallInst>(I)) {
1498 if (Function *Fu = CI->getCalledFunction()) {
1499 if (Fu->isIntrinsic()) {
1500 unsigned const int IntrinsicId = Fu->getIntrinsicID();
1501 switch (IntrinsicId) {
1502 case Intrinsic::fptosi_sat:
1503 case Intrinsic::fptoui_sat:
1505 break;
1506 default:
1507 break;
1508 }
1509 }
1510 }
1511 }
1512}
1513
1514Instruction *SPIRVEmitIntrinsics::visitCallInst(CallInst &Call) {
1515 if (!Call.isInlineAsm())
1516 return &Call;
1517
1518 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
1519 LLVMContext &Ctx = CurrF->getContext();
1520
1521 Constant *TyC = UndefValue::get(IA->getFunctionType());
1522 MDString *ConstraintString = MDString::get(Ctx, IA->getConstraintString());
1524 buildMD(TyC),
1525 MetadataAsValue::get(Ctx, MDNode::get(Ctx, ConstraintString))};
1526 for (unsigned OpIdx = 0; OpIdx < Call.arg_size(); OpIdx++)
1527 Args.push_back(Call.getArgOperand(OpIdx));
1528
1530 B.SetInsertPoint(&Call);
1531 B.CreateIntrinsic(Intrinsic::spv_inline_asm, {Args});
1532 return &Call;
1533}
1534
1535// Use a tip about rounding mode to create a decoration.
1536void SPIRVEmitIntrinsics::useRoundingMode(ConstrainedFPIntrinsic *FPI,
1537 IRBuilder<> &B) {
1538 std::optional<RoundingMode> RM = FPI->getRoundingMode();
1539 if (!RM.has_value())
1540 return;
1541 unsigned RoundingModeDeco = std::numeric_limits<unsigned>::max();
1542 switch (RM.value()) {
1543 default:
1544 // ignore unknown rounding modes
1545 break;
1546 case RoundingMode::NearestTiesToEven:
1547 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTE;
1548 break;
1549 case RoundingMode::TowardNegative:
1550 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTN;
1551 break;
1552 case RoundingMode::TowardPositive:
1553 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTP;
1554 break;
1555 case RoundingMode::TowardZero:
1556 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTZ;
1557 break;
1558 case RoundingMode::Dynamic:
1559 case RoundingMode::NearestTiesToAway:
1560 // TODO: check if supported
1561 break;
1562 }
1563 if (RoundingModeDeco == std::numeric_limits<unsigned>::max())
1564 return;
1565 // Convert the tip about rounding mode into a decoration record.
1566 createRoundingModeDecoration(FPI, RoundingModeDeco, B);
1567}
1568
1569Instruction *SPIRVEmitIntrinsics::visitSwitchInst(SwitchInst &I) {
1570 BasicBlock *ParentBB = I.getParent();
1571 Function *F = ParentBB->getParent();
1572 IRBuilder<> B(ParentBB);
1573 B.SetInsertPoint(&I);
1576 Args.push_back(I.getCondition());
1577 BBCases.push_back(I.getDefaultDest());
1578 Args.push_back(BlockAddress::get(F, I.getDefaultDest()));
1579 for (auto &Case : I.cases()) {
1580 Args.push_back(Case.getCaseValue());
1581 BBCases.push_back(Case.getCaseSuccessor());
1582 Args.push_back(BlockAddress::get(F, Case.getCaseSuccessor()));
1583 }
1584 CallInst *NewI = B.CreateIntrinsic(Intrinsic::spv_switch,
1585 {I.getOperand(0)->getType()}, {Args});
1586 // remove switch to avoid its unneeded and undesirable unwrap into branches
1587 // and conditions
1588 replaceAllUsesWith(&I, NewI);
1589 I.eraseFromParent();
1590 // insert artificial and temporary instruction to preserve valid CFG,
1591 // it will be removed after IR translation pass
1592 B.SetInsertPoint(ParentBB);
1593 IndirectBrInst *BrI = B.CreateIndirectBr(
1594 Constant::getNullValue(PointerType::getUnqual(ParentBB->getContext())),
1595 BBCases.size());
1596 for (BasicBlock *BBCase : BBCases)
1597 BrI->addDestination(BBCase);
1598 return BrI;
1599}
1600
1602 if (GEP->getNumIndices() == 0)
1603 return false;
1604 if (const auto *CI = dyn_cast<ConstantInt>(GEP->getOperand(1))) {
1605 return CI->getZExtValue() == 0;
1606 }
1607 return false;
1608}
1609
1610Instruction *SPIRVEmitIntrinsics::visitGetElementPtrInst(GetElementPtrInst &I) {
1611 IRBuilder<> B(I.getParent());
1612 B.SetInsertPoint(&I);
1613
1615 // Logical SPIR-V cannot use the OpPtrAccessChain instruction. If the first
1616 // index of the GEP is not 0, then we need to try to adjust it.
1617 //
1618 // If the GEP is doing byte addressing, try to rebuild the full access chain
1619 // from the type of the pointer.
1620 if (I.getSourceElementType() ==
1621 IntegerType::getInt8Ty(CurrF->getContext())) {
1622 return buildLogicalAccessChainFromGEP(I);
1623 }
1624
1625 // Look for the array-to-pointer decay. If this is the pattern
1626 // we can adjust the types, and prepend a 0 to the indices.
1627 Value *PtrOp = I.getPointerOperand();
1628 Type *SrcElemTy = I.getSourceElementType();
1629 Type *DeducedPointeeTy = deduceElementType(PtrOp, true);
1630
1631 if (auto *ArrTy = dyn_cast<ArrayType>(DeducedPointeeTy)) {
1632 if (ArrTy->getElementType() == SrcElemTy) {
1633 SmallVector<Value *> NewIndices;
1634 Type *FirstIdxType = I.getOperand(1)->getType();
1635 NewIndices.push_back(ConstantInt::get(FirstIdxType, 0));
1636 for (Value *Idx : I.indices())
1637 NewIndices.push_back(Idx);
1638
1639 SmallVector<Type *, 2> Types = {I.getType(), I.getPointerOperandType()};
1641 Args.push_back(B.getInt1(I.isInBounds()));
1642 Args.push_back(I.getPointerOperand());
1643 Args.append(NewIndices.begin(), NewIndices.end());
1644
1645 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args});
1646 replaceAllUsesWithAndErase(B, &I, NewI);
1647 return NewI;
1648 }
1649 }
1650 }
1651
1652 SmallVector<Type *, 2> Types = {I.getType(), I.getOperand(0)->getType()};
1654 Args.push_back(B.getInt1(I.isInBounds()));
1655 llvm::append_range(Args, I.operands());
1656 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args});
1657 replaceAllUsesWithAndErase(B, &I, NewI);
1658 return NewI;
1659}
1660
1661Instruction *SPIRVEmitIntrinsics::visitBitCastInst(BitCastInst &I) {
1662 IRBuilder<> B(I.getParent());
1663 B.SetInsertPoint(&I);
1664 Value *Source = I.getOperand(0);
1665
1666 // SPIR-V, contrary to LLVM 17+ IR, supports bitcasts between pointers of
1667 // varying element types. In case of IR coming from older versions of LLVM
1668 // such bitcasts do not provide sufficient information, should be just skipped
1669 // here, and handled in insertPtrCastOrAssignTypeInstr.
1670 if (isPointerTy(I.getType())) {
1671 replaceAllUsesWith(&I, Source);
1672 I.eraseFromParent();
1673 return nullptr;
1674 }
1675
1676 SmallVector<Type *, 2> Types = {I.getType(), Source->getType()};
1677 SmallVector<Value *> Args(I.op_begin(), I.op_end());
1678 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_bitcast, {Types}, {Args});
1679 replaceAllUsesWithAndErase(B, &I, NewI);
1680 return NewI;
1681}
1682
1683void SPIRVEmitIntrinsics::insertAssignPtrTypeTargetExt(
1684 TargetExtType *AssignedType, Value *V, IRBuilder<> &B) {
1685 Type *VTy = V->getType();
1686
1687 // A couple of sanity checks.
1688 assert((isPointerTy(VTy)) && "Expect a pointer type!");
1689 if (Type *ElemTy = getPointeeType(VTy))
1690 if (ElemTy != AssignedType)
1691 report_fatal_error("Unexpected pointer element type!");
1692
1693 CallInst *AssignCI = GR->findAssignPtrTypeInstr(V);
1694 if (!AssignCI) {
1695 GR->buildAssignType(B, AssignedType, V);
1696 return;
1697 }
1698
1699 Type *CurrentType =
1701 cast<MetadataAsValue>(AssignCI->getOperand(1))->getMetadata())
1702 ->getType();
1703 if (CurrentType == AssignedType)
1704 return;
1705
1706 // Builtin types cannot be redeclared or casted.
1707 if (CurrentType->isTargetExtTy())
1708 report_fatal_error("Type mismatch " + CurrentType->getTargetExtName() +
1709 "/" + AssignedType->getTargetExtName() +
1710 " for value " + V->getName(),
1711 false);
1712
1713 // Our previous guess about the type seems to be wrong, let's update
1714 // inferred type according to a new, more precise type information.
1715 GR->updateAssignType(AssignCI, V, getNormalizedPoisonValue(AssignedType));
1716}
1717
1718void SPIRVEmitIntrinsics::replacePointerOperandWithPtrCast(
1719 Instruction *I, Value *Pointer, Type *ExpectedElementType,
1720 unsigned OperandToReplace, IRBuilder<> &B) {
1721 TypeValidated.insert(I);
1722
1723 // Do not emit spv_ptrcast if Pointer's element type is ExpectedElementType
1724 Type *PointerElemTy = deduceElementTypeHelper(Pointer, false);
1725 if (PointerElemTy == ExpectedElementType ||
1726 isEquivalentTypes(PointerElemTy, ExpectedElementType))
1727 return;
1728
1730 Value *ExpectedElementVal = getNormalizedPoisonValue(ExpectedElementType);
1731 MetadataAsValue *VMD = buildMD(ExpectedElementVal);
1732 unsigned AddressSpace = getPointerAddressSpace(Pointer->getType());
1733 bool FirstPtrCastOrAssignPtrType = true;
1734
1735 // Do not emit new spv_ptrcast if equivalent one already exists or when
1736 // spv_assign_ptr_type already targets this pointer with the same element
1737 // type.
1738 if (Pointer->hasUseList()) {
1739 for (auto User : Pointer->users()) {
1740 auto *II = dyn_cast<IntrinsicInst>(User);
1741 if (!II ||
1742 (II->getIntrinsicID() != Intrinsic::spv_assign_ptr_type &&
1743 II->getIntrinsicID() != Intrinsic::spv_ptrcast) ||
1744 II->getOperand(0) != Pointer)
1745 continue;
1746
1747 // There is some spv_ptrcast/spv_assign_ptr_type already targeting this
1748 // pointer.
1749 FirstPtrCastOrAssignPtrType = false;
1750 if (II->getOperand(1) != VMD ||
1751 dyn_cast<ConstantInt>(II->getOperand(2))->getSExtValue() !=
1753 continue;
1754
1755 // The spv_ptrcast/spv_assign_ptr_type targeting this pointer is of the
1756 // same element type and address space.
1757 if (II->getIntrinsicID() != Intrinsic::spv_ptrcast)
1758 return;
1759
1760 // This must be a spv_ptrcast, do not emit new if this one has the same BB
1761 // as I. Otherwise, search for other spv_ptrcast/spv_assign_ptr_type.
1762 if (II->getParent() != I->getParent())
1763 continue;
1764
1765 I->setOperand(OperandToReplace, II);
1766 return;
1767 }
1768 }
1769
1770 if (isa<Instruction>(Pointer) || isa<Argument>(Pointer)) {
1771 if (FirstPtrCastOrAssignPtrType) {
1772 // If this would be the first spv_ptrcast, do not emit spv_ptrcast and
1773 // emit spv_assign_ptr_type instead.
1774 GR->buildAssignPtr(B, ExpectedElementType, Pointer);
1775 return;
1776 } else if (isTodoType(Pointer)) {
1777 eraseTodoType(Pointer);
1778 if (!isa<CallInst>(Pointer) && !isa<GetElementPtrInst>(Pointer)) {
1779 // If this wouldn't be the first spv_ptrcast but existing type info is
1780 // uncomplete, update spv_assign_ptr_type arguments.
1781 if (CallInst *AssignCI = GR->findAssignPtrTypeInstr(Pointer)) {
1782 Type *PrevElemTy = GR->findDeducedElementType(Pointer);
1783 assert(PrevElemTy);
1784 DenseSet<std::pair<Value *, Value *>> VisitedSubst{
1785 std::make_pair(I, Pointer)};
1786 GR->updateAssignType(AssignCI, Pointer, ExpectedElementVal);
1787 propagateElemType(Pointer, PrevElemTy, VisitedSubst);
1788 } else {
1789 GR->buildAssignPtr(B, ExpectedElementType, Pointer);
1790 }
1791 return;
1792 }
1793 }
1794 }
1795
1796 // Emit spv_ptrcast
1797 SmallVector<Type *, 2> Types = {Pointer->getType(), Pointer->getType()};
1798 SmallVector<Value *, 2> Args = {Pointer, VMD, B.getInt32(AddressSpace)};
1799 auto *PtrCastI = B.CreateIntrinsic(Intrinsic::spv_ptrcast, {Types}, Args);
1800 I->setOperand(OperandToReplace, PtrCastI);
1801 // We need to set up a pointee type for the newly created spv_ptrcast.
1802 GR->buildAssignPtr(B, ExpectedElementType, PtrCastI);
1803}
1804
1805void SPIRVEmitIntrinsics::insertPtrCastOrAssignTypeInstr(Instruction *I,
1806 IRBuilder<> &B) {
1807 // Handle basic instructions:
1808 StoreInst *SI = dyn_cast<StoreInst>(I);
1809 if (IsKernelArgInt8(CurrF, SI)) {
1810 replacePointerOperandWithPtrCast(
1811 I, SI->getValueOperand(), IntegerType::getInt8Ty(CurrF->getContext()),
1812 0, B);
1813 }
1814 if (SI) {
1815 Value *Op = SI->getValueOperand();
1816 Value *Pointer = SI->getPointerOperand();
1817 Type *OpTy = Op->getType();
1818 if (auto *OpI = dyn_cast<Instruction>(Op))
1819 OpTy = restoreMutatedType(GR, OpI, OpTy);
1820 if (OpTy == Op->getType())
1821 OpTy = deduceElementTypeByValueDeep(OpTy, Op, false);
1822 replacePointerOperandWithPtrCast(I, Pointer, OpTy, 1, B);
1823 return;
1824 }
1825 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1826 Value *Pointer = LI->getPointerOperand();
1827 Type *OpTy = LI->getType();
1828 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
1829 if (Type *ElemTy = GR->findDeducedElementType(LI)) {
1830 OpTy = getTypedPointerWrapper(ElemTy, PtrTy->getAddressSpace());
1831 } else {
1832 Type *NewOpTy = OpTy;
1833 OpTy = deduceElementTypeByValueDeep(OpTy, LI, false);
1834 if (OpTy == NewOpTy)
1835 insertTodoType(Pointer);
1836 }
1837 }
1838 replacePointerOperandWithPtrCast(I, Pointer, OpTy, 0, B);
1839 return;
1840 }
1841 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
1842 Value *Pointer = GEPI->getPointerOperand();
1843 Type *OpTy = nullptr;
1844
1845 // Logical SPIR-V is not allowed to use Op*PtrAccessChain instructions. If
1846 // the first index is 0, then we can trivially lower to OpAccessChain. If
1847 // not we need to try to rewrite the GEP. We avoid adding a pointer cast at
1848 // this time, and will rewrite the GEP when visiting it.
1849 if (TM->getSubtargetImpl()->isLogicalSPIRV() && !isFirstIndexZero(GEPI)) {
1850 return;
1851 }
1852
1853 // In all cases, fall back to the GEP type if type scavenging failed.
1854 if (!OpTy)
1855 OpTy = GEPI->getSourceElementType();
1856
1857 replacePointerOperandWithPtrCast(I, Pointer, OpTy, 0, B);
1858 if (isNestedPointer(OpTy))
1859 insertTodoType(Pointer);
1860 return;
1861 }
1862
1863 // TODO: review and merge with existing logics:
1864 // Handle calls to builtins (non-intrinsics):
1865 CallInst *CI = dyn_cast<CallInst>(I);
1866 if (!CI || CI->isIndirectCall() || CI->isInlineAsm() ||
1868 return;
1869
1870 // collect information about formal parameter types
1871 std::string DemangledName =
1873 Function *CalledF = CI->getCalledFunction();
1874 SmallVector<Type *, 4> CalledArgTys;
1875 bool HaveTypes = false;
1876 for (unsigned OpIdx = 0; OpIdx < CalledF->arg_size(); ++OpIdx) {
1877 Argument *CalledArg = CalledF->getArg(OpIdx);
1878 Type *ArgType = CalledArg->getType();
1879 if (!isPointerTy(ArgType)) {
1880 CalledArgTys.push_back(nullptr);
1881 } else if (Type *ArgTypeElem = getPointeeType(ArgType)) {
1882 CalledArgTys.push_back(ArgTypeElem);
1883 HaveTypes = true;
1884 } else {
1885 Type *ElemTy = GR->findDeducedElementType(CalledArg);
1886 if (!ElemTy && hasPointeeTypeAttr(CalledArg))
1887 ElemTy = getPointeeTypeByAttr(CalledArg);
1888 if (!ElemTy) {
1889 ElemTy = getPointeeTypeByCallInst(DemangledName, CalledF, OpIdx);
1890 if (ElemTy) {
1891 GR->addDeducedElementType(CalledArg, normalizeType(ElemTy));
1892 } else {
1893 for (User *U : CalledArg->users()) {
1894 if (Instruction *Inst = dyn_cast<Instruction>(U)) {
1895 if ((ElemTy = deduceElementTypeHelper(Inst, false)) != nullptr)
1896 break;
1897 }
1898 }
1899 }
1900 }
1901 HaveTypes |= ElemTy != nullptr;
1902 CalledArgTys.push_back(ElemTy);
1903 }
1904 }
1905
1906 if (DemangledName.empty() && !HaveTypes)
1907 return;
1908
1909 for (unsigned OpIdx = 0; OpIdx < CI->arg_size(); OpIdx++) {
1910 Value *ArgOperand = CI->getArgOperand(OpIdx);
1911 if (!isPointerTy(ArgOperand->getType()))
1912 continue;
1913
1914 // Constants (nulls/undefs) are handled in insertAssignPtrTypeIntrs()
1915 if (!isa<Instruction>(ArgOperand) && !isa<Argument>(ArgOperand)) {
1916 // However, we may have assumptions about the formal argument's type and
1917 // may have a need to insert a ptr cast for the actual parameter of this
1918 // call.
1919 Argument *CalledArg = CalledF->getArg(OpIdx);
1920 if (!GR->findDeducedElementType(CalledArg))
1921 continue;
1922 }
1923
1924 Type *ExpectedType =
1925 OpIdx < CalledArgTys.size() ? CalledArgTys[OpIdx] : nullptr;
1926 if (!ExpectedType && !DemangledName.empty())
1927 ExpectedType = SPIRV::parseBuiltinCallArgumentBaseType(
1928 DemangledName, OpIdx, I->getContext());
1929 if (!ExpectedType || ExpectedType->isVoidTy())
1930 continue;
1931
1932 if (ExpectedType->isTargetExtTy() &&
1934 insertAssignPtrTypeTargetExt(cast<TargetExtType>(ExpectedType),
1935 ArgOperand, B);
1936 else
1937 replacePointerOperandWithPtrCast(CI, ArgOperand, ExpectedType, OpIdx, B);
1938 }
1939}
1940
1941Instruction *SPIRVEmitIntrinsics::visitInsertElementInst(InsertElementInst &I) {
1942 // If it's a <1 x Type> vector type, don't modify it. It's not a legal vector
1943 // type in LLT and IRTranslator will replace it by the scalar.
1944 if (isVector1(I.getType()))
1945 return &I;
1946
1947 SmallVector<Type *, 4> Types = {I.getType(), I.getOperand(0)->getType(),
1948 I.getOperand(1)->getType(),
1949 I.getOperand(2)->getType()};
1950 IRBuilder<> B(I.getParent());
1951 B.SetInsertPoint(&I);
1952 SmallVector<Value *> Args(I.op_begin(), I.op_end());
1953 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_insertelt, {Types}, {Args});
1954 replaceAllUsesWithAndErase(B, &I, NewI);
1955 return NewI;
1956}
1957
1959SPIRVEmitIntrinsics::visitExtractElementInst(ExtractElementInst &I) {
1960 // If it's a <1 x Type> vector type, don't modify it. It's not a legal vector
1961 // type in LLT and IRTranslator will replace it by the scalar.
1962 if (isVector1(I.getVectorOperandType()))
1963 return &I;
1964
1965 IRBuilder<> B(I.getParent());
1966 B.SetInsertPoint(&I);
1967 SmallVector<Type *, 3> Types = {I.getType(), I.getVectorOperandType(),
1968 I.getIndexOperand()->getType()};
1969 SmallVector<Value *, 2> Args = {I.getVectorOperand(), I.getIndexOperand()};
1970 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_extractelt, {Types}, {Args});
1971 replaceAllUsesWithAndErase(B, &I, NewI);
1972 return NewI;
1973}
1974
1975Instruction *SPIRVEmitIntrinsics::visitInsertValueInst(InsertValueInst &I) {
1976 IRBuilder<> B(I.getParent());
1977 B.SetInsertPoint(&I);
1978 SmallVector<Type *, 1> Types = {I.getInsertedValueOperand()->getType()};
1980 Value *AggregateOp = I.getAggregateOperand();
1981 if (isa<UndefValue>(AggregateOp))
1982 Args.push_back(UndefValue::get(B.getInt32Ty()));
1983 else
1984 Args.push_back(AggregateOp);
1985 Args.push_back(I.getInsertedValueOperand());
1986 for (auto &Op : I.indices())
1987 Args.push_back(B.getInt32(Op));
1988 Instruction *NewI =
1989 B.CreateIntrinsic(Intrinsic::spv_insertv, {Types}, {Args});
1990 replaceMemInstrUses(&I, NewI, B);
1991 return NewI;
1992}
1993
1994Instruction *SPIRVEmitIntrinsics::visitExtractValueInst(ExtractValueInst &I) {
1995 if (I.getAggregateOperand()->getType()->isAggregateType())
1996 return &I;
1997 IRBuilder<> B(I.getParent());
1998 B.SetInsertPoint(&I);
1999 SmallVector<Value *> Args(I.operands());
2000 for (auto &Op : I.indices())
2001 Args.push_back(B.getInt32(Op));
2002 auto *NewI =
2003 B.CreateIntrinsic(Intrinsic::spv_extractv, {I.getType()}, {Args});
2004 replaceAllUsesWithAndErase(B, &I, NewI);
2005 return NewI;
2006}
2007
2008Instruction *SPIRVEmitIntrinsics::visitLoadInst(LoadInst &I) {
2009 if (!I.getType()->isAggregateType())
2010 return &I;
2011 IRBuilder<> B(I.getParent());
2012 B.SetInsertPoint(&I);
2013 TrackConstants = false;
2014 const auto *TLI = TM->getSubtargetImpl()->getTargetLowering();
2016 TLI->getLoadMemOperandFlags(I, CurrF->getDataLayout());
2017 auto *NewI =
2018 B.CreateIntrinsic(Intrinsic::spv_load, {I.getOperand(0)->getType()},
2019 {I.getPointerOperand(), B.getInt16(Flags),
2020 B.getInt8(I.getAlign().value())});
2021 replaceMemInstrUses(&I, NewI, B);
2022 return NewI;
2023}
2024
2025Instruction *SPIRVEmitIntrinsics::visitStoreInst(StoreInst &I) {
2026 if (!AggrStores.contains(&I))
2027 return &I;
2028 IRBuilder<> B(I.getParent());
2029 B.SetInsertPoint(&I);
2030 TrackConstants = false;
2031 const auto *TLI = TM->getSubtargetImpl()->getTargetLowering();
2033 TLI->getStoreMemOperandFlags(I, CurrF->getDataLayout());
2034 auto *PtrOp = I.getPointerOperand();
2035 auto *NewI = B.CreateIntrinsic(
2036 Intrinsic::spv_store, {I.getValueOperand()->getType(), PtrOp->getType()},
2037 {I.getValueOperand(), PtrOp, B.getInt16(Flags),
2038 B.getInt8(I.getAlign().value())});
2039 NewI->copyMetadata(I);
2040 I.eraseFromParent();
2041 return NewI;
2042}
2043
2044Instruction *SPIRVEmitIntrinsics::visitAllocaInst(AllocaInst &I) {
2045 Value *ArraySize = nullptr;
2046 if (I.isArrayAllocation()) {
2047 const SPIRVSubtarget *STI = TM->getSubtargetImpl(*I.getFunction());
2048 if (!STI->canUseExtension(
2049 SPIRV::Extension::SPV_INTEL_variable_length_array))
2051 "array allocation: this instruction requires the following "
2052 "SPIR-V extension: SPV_INTEL_variable_length_array",
2053 false);
2054 ArraySize = I.getArraySize();
2055 }
2056 IRBuilder<> B(I.getParent());
2057 B.SetInsertPoint(&I);
2058 TrackConstants = false;
2059 Type *PtrTy = I.getType();
2060 auto *NewI =
2061 ArraySize
2062 ? B.CreateIntrinsic(Intrinsic::spv_alloca_array,
2063 {PtrTy, ArraySize->getType()},
2064 {ArraySize, B.getInt8(I.getAlign().value())})
2065 : B.CreateIntrinsic(Intrinsic::spv_alloca, {PtrTy},
2066 {B.getInt8(I.getAlign().value())});
2067 replaceAllUsesWithAndErase(B, &I, NewI);
2068 return NewI;
2069}
2070
2071Instruction *SPIRVEmitIntrinsics::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
2072 assert(I.getType()->isAggregateType() && "Aggregate result is expected");
2073 IRBuilder<> B(I.getParent());
2074 B.SetInsertPoint(&I);
2075 SmallVector<Value *> Args(I.operands());
2076 Args.push_back(B.getInt32(
2077 static_cast<uint32_t>(getMemScope(I.getContext(), I.getSyncScopeID()))));
2078 Args.push_back(B.getInt32(
2079 static_cast<uint32_t>(getMemSemantics(I.getSuccessOrdering()))));
2080 Args.push_back(B.getInt32(
2081 static_cast<uint32_t>(getMemSemantics(I.getFailureOrdering()))));
2082 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_cmpxchg,
2083 {I.getPointerOperand()->getType()}, {Args});
2084 replaceMemInstrUses(&I, NewI, B);
2085 return NewI;
2086}
2087
2088Instruction *SPIRVEmitIntrinsics::visitUnreachableInst(UnreachableInst &I) {
2089 IRBuilder<> B(I.getParent());
2090 B.SetInsertPoint(&I);
2091 B.CreateIntrinsic(Intrinsic::spv_unreachable, {});
2092 return &I;
2093}
2094
2095void SPIRVEmitIntrinsics::processGlobalValue(GlobalVariable &GV,
2096 IRBuilder<> &B) {
2097 // Skip special artificial variables.
2098 static const StringSet<> ArtificialGlobals{"llvm.global.annotations",
2099 "llvm.compiler.used"};
2100
2101 if (ArtificialGlobals.contains(GV.getName()))
2102 return;
2103
2104 Constant *Init = nullptr;
2105 if (hasInitializer(&GV)) {
2106 // Deduce element type and store results in Global Registry.
2107 // Result is ignored, because TypedPointerType is not supported
2108 // by llvm IR general logic.
2109 deduceElementTypeHelper(&GV, false);
2110 Init = GV.getInitializer();
2111 Type *Ty = isAggrConstForceInt32(Init) ? B.getInt32Ty() : Init->getType();
2112 Constant *Const = isAggrConstForceInt32(Init) ? B.getInt32(1) : Init;
2113 auto *InitInst = B.CreateIntrinsic(Intrinsic::spv_init_global,
2114 {GV.getType(), Ty}, {&GV, Const});
2115 InitInst->setArgOperand(1, Init);
2116 }
2117 if (!Init && GV.use_empty())
2118 B.CreateIntrinsic(Intrinsic::spv_unref_global, GV.getType(), &GV);
2119}
2120
2121// Return true, if we can't decide what is the pointee type now and will get
2122// back to the question later. Return false is spv_assign_ptr_type is not needed
2123// or can be inserted immediately.
2124bool SPIRVEmitIntrinsics::insertAssignPtrTypeIntrs(Instruction *I,
2125 IRBuilder<> &B,
2126 bool UnknownElemTypeI8) {
2128 if (!isPointerTy(I->getType()) || !requireAssignType(I))
2129 return false;
2130
2132 if (Type *ElemTy = deduceElementType(I, UnknownElemTypeI8)) {
2133 GR->buildAssignPtr(B, ElemTy, I);
2134 return false;
2135 }
2136 return true;
2137}
2138
2139void SPIRVEmitIntrinsics::insertAssignTypeIntrs(Instruction *I,
2140 IRBuilder<> &B) {
2141 // TODO: extend the list of functions with known result types
2142 static StringMap<unsigned> ResTypeWellKnown = {
2143 {"async_work_group_copy", WellKnownTypes::Event},
2144 {"async_work_group_strided_copy", WellKnownTypes::Event},
2145 {"__spirv_GroupAsyncCopy", WellKnownTypes::Event}};
2146
2148
2149 bool IsKnown = false;
2150 if (auto *CI = dyn_cast<CallInst>(I)) {
2151 if (!CI->isIndirectCall() && !CI->isInlineAsm() &&
2152 CI->getCalledFunction() && !CI->getCalledFunction()->isIntrinsic()) {
2153 Function *CalledF = CI->getCalledFunction();
2154 std::string DemangledName =
2156 FPDecorationId DecorationId = FPDecorationId::NONE;
2157 if (DemangledName.length() > 0)
2158 DemangledName =
2159 SPIRV::lookupBuiltinNameHelper(DemangledName, &DecorationId);
2160 auto ResIt = ResTypeWellKnown.find(DemangledName);
2161 if (ResIt != ResTypeWellKnown.end()) {
2162 IsKnown = true;
2164 switch (ResIt->second) {
2165 case WellKnownTypes::Event:
2166 GR->buildAssignType(
2167 B, TargetExtType::get(I->getContext(), "spirv.Event"), I);
2168 break;
2169 }
2170 }
2171 // check if a floating rounding mode or saturation info is present
2172 switch (DecorationId) {
2173 default:
2174 break;
2175 case FPDecorationId::SAT:
2177 break;
2178 case FPDecorationId::RTE:
2180 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTE, B);
2181 break;
2182 case FPDecorationId::RTZ:
2184 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTZ, B);
2185 break;
2186 case FPDecorationId::RTP:
2188 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTP, B);
2189 break;
2190 case FPDecorationId::RTN:
2192 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTN, B);
2193 break;
2194 }
2195 }
2196 }
2197
2198 Type *Ty = I->getType();
2199 if (!IsKnown && !Ty->isVoidTy() && !isPointerTy(Ty) && requireAssignType(I)) {
2201 Type *TypeToAssign = Ty;
2202 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
2203 if (II->getIntrinsicID() == Intrinsic::spv_const_composite ||
2204 II->getIntrinsicID() == Intrinsic::spv_undef) {
2205 auto It = AggrConstTypes.find(II);
2206 if (It == AggrConstTypes.end())
2207 report_fatal_error("Unknown composite intrinsic type");
2208 TypeToAssign = It->second;
2209 }
2210 }
2211 TypeToAssign = restoreMutatedType(GR, I, TypeToAssign);
2212 GR->buildAssignType(B, TypeToAssign, I);
2213 }
2214 for (const auto &Op : I->operands()) {
2216 // Check GetElementPtrConstantExpr case.
2218 (isa<GEPOperator>(Op) ||
2219 (cast<ConstantExpr>(Op)->getOpcode() == CastInst::IntToPtr)))) {
2221 Type *OpTy = Op->getType();
2222 if (isa<UndefValue>(Op) && OpTy->isAggregateType()) {
2223 CallInst *AssignCI =
2224 buildIntrWithMD(Intrinsic::spv_assign_type, {B.getInt32Ty()}, Op,
2225 UndefValue::get(B.getInt32Ty()), {}, B);
2226 GR->addAssignPtrTypeInstr(Op, AssignCI);
2227 } else if (!isa<Instruction>(Op)) {
2228 Type *OpTy = Op->getType();
2229 Type *OpTyElem = getPointeeType(OpTy);
2230 if (OpTyElem) {
2231 GR->buildAssignPtr(B, OpTyElem, Op);
2232 } else if (isPointerTy(OpTy)) {
2233 Type *ElemTy = GR->findDeducedElementType(Op);
2234 GR->buildAssignPtr(B, ElemTy ? ElemTy : deduceElementType(Op, true),
2235 Op);
2236 } else {
2237 Value *OpTyVal = Op;
2238 if (OpTy->isTargetExtTy()) {
2239 // We need to do this in order to be consistent with how target ext
2240 // types are handled in `processInstrAfterVisit`
2241 OpTyVal = getNormalizedPoisonValue(OpTy);
2242 }
2243 CallInst *AssignCI =
2244 buildIntrWithMD(Intrinsic::spv_assign_type, {OpTy},
2245 getNormalizedPoisonValue(OpTy), OpTyVal, {}, B);
2246 GR->addAssignPtrTypeInstr(OpTyVal, AssignCI);
2247 }
2248 }
2249 }
2250 }
2251}
2252
2253bool SPIRVEmitIntrinsics::shouldTryToAddMemAliasingDecoration(
2254 Instruction *Inst) {
2255 const SPIRVSubtarget *STI = TM->getSubtargetImpl(*Inst->getFunction());
2256 if (!STI->canUseExtension(SPIRV::Extension::SPV_INTEL_memory_access_aliasing))
2257 return false;
2258 // Add aliasing decorations to internal load and store intrinsics
2259 // and atomic instructions, skipping atomic store as it won't have ID to
2260 // attach the decoration.
2261 CallInst *CI = dyn_cast<CallInst>(Inst);
2262 if (!CI)
2263 return false;
2264 if (Function *Fun = CI->getCalledFunction()) {
2265 if (Fun->isIntrinsic()) {
2266 switch (Fun->getIntrinsicID()) {
2267 case Intrinsic::spv_load:
2268 case Intrinsic::spv_store:
2269 return true;
2270 default:
2271 return false;
2272 }
2273 }
2275 const std::string Prefix = "__spirv_Atomic";
2276 const bool IsAtomic = Name.find(Prefix) == 0;
2277
2278 if (!Fun->getReturnType()->isVoidTy() && IsAtomic)
2279 return true;
2280 }
2281 return false;
2282}
2283
2284void SPIRVEmitIntrinsics::insertSpirvDecorations(Instruction *I,
2285 IRBuilder<> &B) {
2286 if (MDNode *MD = I->getMetadata("spirv.Decorations")) {
2288 B.CreateIntrinsic(Intrinsic::spv_assign_decoration, {I->getType()},
2289 {I, MetadataAsValue::get(I->getContext(), MD)});
2290 }
2291 // Lower alias.scope/noalias metadata
2292 {
2293 auto processMemAliasingDecoration = [&](unsigned Kind) {
2294 if (MDNode *AliasListMD = I->getMetadata(Kind)) {
2295 if (shouldTryToAddMemAliasingDecoration(I)) {
2296 uint32_t Dec = Kind == LLVMContext::MD_alias_scope
2297 ? SPIRV::Decoration::AliasScopeINTEL
2298 : SPIRV::Decoration::NoAliasINTEL;
2300 I, ConstantInt::get(B.getInt32Ty(), Dec),
2301 MetadataAsValue::get(I->getContext(), AliasListMD)};
2303 B.CreateIntrinsic(Intrinsic::spv_assign_aliasing_decoration,
2304 {I->getType()}, {Args});
2305 }
2306 }
2307 };
2308 processMemAliasingDecoration(LLVMContext::MD_alias_scope);
2309 processMemAliasingDecoration(LLVMContext::MD_noalias);
2310 }
2311 // MD_fpmath
2312 if (MDNode *MD = I->getMetadata(LLVMContext::MD_fpmath)) {
2313 const SPIRVSubtarget *STI = TM->getSubtargetImpl(*I->getFunction());
2314 bool AllowFPMaxError =
2315 STI->canUseExtension(SPIRV::Extension::SPV_INTEL_fp_max_error);
2316 if (!AllowFPMaxError)
2317 return;
2318
2320 B.CreateIntrinsic(Intrinsic::spv_assign_fpmaxerror_decoration,
2321 {I->getType()},
2322 {I, MetadataAsValue::get(I->getContext(), MD)});
2323 }
2324}
2325
2327 const Module &M,
2329 &FPFastMathDefaultInfoMap,
2330 Function *F) {
2331 auto it = FPFastMathDefaultInfoMap.find(F);
2332 if (it != FPFastMathDefaultInfoMap.end())
2333 return it->second;
2334
2335 // If the map does not contain the entry, create a new one. Initialize it to
2336 // contain all 3 elements sorted by bit width of target type: {half, float,
2337 // double}.
2338 SPIRV::FPFastMathDefaultInfoVector FPFastMathDefaultInfoVec;
2339 FPFastMathDefaultInfoVec.emplace_back(Type::getHalfTy(M.getContext()),
2340 SPIRV::FPFastMathMode::None);
2341 FPFastMathDefaultInfoVec.emplace_back(Type::getFloatTy(M.getContext()),
2342 SPIRV::FPFastMathMode::None);
2343 FPFastMathDefaultInfoVec.emplace_back(Type::getDoubleTy(M.getContext()),
2344 SPIRV::FPFastMathMode::None);
2345 return FPFastMathDefaultInfoMap[F] = std::move(FPFastMathDefaultInfoVec);
2346}
2347
2349 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec,
2350 const Type *Ty) {
2351 size_t BitWidth = Ty->getScalarSizeInBits();
2352 int Index =
2354 BitWidth);
2355 assert(Index >= 0 && Index < 3 &&
2356 "Expected FPFastMathDefaultInfo for half, float, or double");
2357 assert(FPFastMathDefaultInfoVec.size() == 3 &&
2358 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2359 return FPFastMathDefaultInfoVec[Index];
2360}
2361
2362void SPIRVEmitIntrinsics::insertConstantsForFPFastMathDefault(Module &M) {
2363 const SPIRVSubtarget *ST = TM->getSubtargetImpl();
2364 if (!ST->canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2))
2365 return;
2366
2367 // Store the FPFastMathDefaultInfo in the FPFastMathDefaultInfoMap.
2368 // We need the entry point (function) as the key, and the target
2369 // type and flags as the value.
2370 // We also need to check ContractionOff and SignedZeroInfNanPreserve
2371 // execution modes, as they are now deprecated and must be replaced
2372 // with FPFastMathDefaultInfo.
2373 auto Node = M.getNamedMetadata("spirv.ExecutionMode");
2374 if (!Node) {
2375 if (!M.getNamedMetadata("opencl.enable.FP_CONTRACT")) {
2376 // This requires emitting ContractionOff. However, because
2377 // ContractionOff is now deprecated, we need to replace it with
2378 // FPFastMathDefaultInfo with FP Fast Math Mode bitmask set to all 0.
2379 // We need to create the constant for that.
2380
2381 // Create constant instruction with the bitmask flags.
2382 Constant *InitValue =
2383 ConstantInt::get(Type::getInt32Ty(M.getContext()), 0);
2384 // TODO: Reuse constant if there is one already with the required
2385 // value.
2386 [[maybe_unused]] GlobalVariable *GV =
2387 new GlobalVariable(M, // Module
2388 Type::getInt32Ty(M.getContext()), // Type
2389 true, // isConstant
2391 InitValue // Initializer
2392 );
2393 }
2394 return;
2395 }
2396
2397 // The table maps function pointers to their default FP fast math info. It
2398 // can be assumed that the SmallVector is sorted by the bit width of the
2399 // type. The first element is the smallest bit width, and the last element
2400 // is the largest bit width, therefore, we will have {half, float, double}
2401 // in the order of their bit widths.
2402 DenseMap<Function *, SPIRV::FPFastMathDefaultInfoVector>
2403 FPFastMathDefaultInfoMap;
2404
2405 for (unsigned i = 0; i < Node->getNumOperands(); i++) {
2406 MDNode *MDN = cast<MDNode>(Node->getOperand(i));
2407 assert(MDN->getNumOperands() >= 2 && "Expected at least 2 operands");
2409 cast<ConstantAsMetadata>(MDN->getOperand(0))->getValue());
2410 const auto EM =
2412 cast<ConstantAsMetadata>(MDN->getOperand(1))->getValue())
2413 ->getZExtValue();
2414 if (EM == SPIRV::ExecutionMode::FPFastMathDefault) {
2415 assert(MDN->getNumOperands() == 4 &&
2416 "Expected 4 operands for FPFastMathDefault");
2417 const Type *T = cast<ValueAsMetadata>(MDN->getOperand(2))->getType();
2418 unsigned Flags =
2420 cast<ConstantAsMetadata>(MDN->getOperand(3))->getValue())
2421 ->getZExtValue();
2422 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2423 getOrCreateFPFastMathDefaultInfoVec(M, FPFastMathDefaultInfoMap, F);
2424 SPIRV::FPFastMathDefaultInfo &Info =
2425 getFPFastMathDefaultInfo(FPFastMathDefaultInfoVec, T);
2426 Info.FastMathFlags = Flags;
2427 Info.FPFastMathDefault = true;
2428 } else if (EM == SPIRV::ExecutionMode::ContractionOff) {
2429 assert(MDN->getNumOperands() == 2 &&
2430 "Expected no operands for ContractionOff");
2431
2432 // We need to save this info for every possible FP type, i.e. {half,
2433 // float, double, fp128}.
2434 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2435 getOrCreateFPFastMathDefaultInfoVec(M, FPFastMathDefaultInfoMap, F);
2436 for (SPIRV::FPFastMathDefaultInfo &Info : FPFastMathDefaultInfoVec) {
2437 Info.ContractionOff = true;
2438 }
2439 } else if (EM == SPIRV::ExecutionMode::SignedZeroInfNanPreserve) {
2440 assert(MDN->getNumOperands() == 3 &&
2441 "Expected 1 operand for SignedZeroInfNanPreserve");
2442 unsigned TargetWidth =
2444 cast<ConstantAsMetadata>(MDN->getOperand(2))->getValue())
2445 ->getZExtValue();
2446 // We need to save this info only for the FP type with TargetWidth.
2447 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2448 getOrCreateFPFastMathDefaultInfoVec(M, FPFastMathDefaultInfoMap, F);
2451 assert(Index >= 0 && Index < 3 &&
2452 "Expected FPFastMathDefaultInfo for half, float, or double");
2453 assert(FPFastMathDefaultInfoVec.size() == 3 &&
2454 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2455 FPFastMathDefaultInfoVec[Index].SignedZeroInfNanPreserve = true;
2456 }
2457 }
2458
2459 std::unordered_map<unsigned, GlobalVariable *> GlobalVars;
2460 for (auto &[Func, FPFastMathDefaultInfoVec] : FPFastMathDefaultInfoMap) {
2461 if (FPFastMathDefaultInfoVec.empty())
2462 continue;
2463
2464 for (const SPIRV::FPFastMathDefaultInfo &Info : FPFastMathDefaultInfoVec) {
2465 assert(Info.Ty && "Expected target type for FPFastMathDefaultInfo");
2466 // Skip if none of the execution modes was used.
2467 unsigned Flags = Info.FastMathFlags;
2468 if (Flags == SPIRV::FPFastMathMode::None && !Info.ContractionOff &&
2469 !Info.SignedZeroInfNanPreserve && !Info.FPFastMathDefault)
2470 continue;
2471
2472 // Check if flags are compatible.
2473 if (Info.ContractionOff && (Flags & SPIRV::FPFastMathMode::AllowContract))
2474 report_fatal_error("Conflicting FPFastMathFlags: ContractionOff "
2475 "and AllowContract");
2476
2477 if (Info.SignedZeroInfNanPreserve &&
2478 !(Flags &
2479 (SPIRV::FPFastMathMode::NotNaN | SPIRV::FPFastMathMode::NotInf |
2480 SPIRV::FPFastMathMode::NSZ))) {
2481 if (Info.FPFastMathDefault)
2482 report_fatal_error("Conflicting FPFastMathFlags: "
2483 "SignedZeroInfNanPreserve but at least one of "
2484 "NotNaN/NotInf/NSZ is enabled.");
2485 }
2486
2487 if ((Flags & SPIRV::FPFastMathMode::AllowTransform) &&
2488 !((Flags & SPIRV::FPFastMathMode::AllowReassoc) &&
2489 (Flags & SPIRV::FPFastMathMode::AllowContract))) {
2490 report_fatal_error("Conflicting FPFastMathFlags: "
2491 "AllowTransform requires AllowReassoc and "
2492 "AllowContract to be set.");
2493 }
2494
2495 auto it = GlobalVars.find(Flags);
2496 GlobalVariable *GV = nullptr;
2497 if (it != GlobalVars.end()) {
2498 // Reuse existing global variable.
2499 GV = it->second;
2500 } else {
2501 // Create constant instruction with the bitmask flags.
2502 Constant *InitValue =
2503 ConstantInt::get(Type::getInt32Ty(M.getContext()), Flags);
2504 // TODO: Reuse constant if there is one already with the required
2505 // value.
2506 GV = new GlobalVariable(M, // Module
2507 Type::getInt32Ty(M.getContext()), // Type
2508 true, // isConstant
2510 InitValue // Initializer
2511 );
2512 GlobalVars[Flags] = GV;
2513 }
2514 }
2515 }
2516}
2517
2518void SPIRVEmitIntrinsics::processInstrAfterVisit(Instruction *I,
2519 IRBuilder<> &B) {
2520 auto *II = dyn_cast<IntrinsicInst>(I);
2521 bool IsConstComposite =
2522 II && II->getIntrinsicID() == Intrinsic::spv_const_composite;
2523 if (IsConstComposite && TrackConstants) {
2525 auto t = AggrConsts.find(I);
2526 assert(t != AggrConsts.end());
2527 auto *NewOp =
2528 buildIntrWithMD(Intrinsic::spv_track_constant,
2529 {II->getType(), II->getType()}, t->second, I, {}, B);
2530 replaceAllUsesWith(I, NewOp, false);
2531 NewOp->setArgOperand(0, I);
2532 }
2533 bool IsPhi = isa<PHINode>(I), BPrepared = false;
2534 for (const auto &Op : I->operands()) {
2535 if (isa<PHINode>(I) || isa<SwitchInst>(I) ||
2537 continue;
2538 unsigned OpNo = Op.getOperandNo();
2539 if (II && ((II->getIntrinsicID() == Intrinsic::spv_gep && OpNo == 0) ||
2540 (II->paramHasAttr(OpNo, Attribute::ImmArg))))
2541 continue;
2542
2543 if (!BPrepared) {
2544 IsPhi ? B.SetInsertPointPastAllocas(I->getParent()->getParent())
2545 : B.SetInsertPoint(I);
2546 BPrepared = true;
2547 }
2548 Type *OpTy = Op->getType();
2549 Type *OpElemTy = GR->findDeducedElementType(Op);
2550 Value *NewOp = Op;
2551 if (OpTy->isTargetExtTy()) {
2552 // Since this value is replaced by poison, we need to do the same in
2553 // `insertAssignTypeIntrs`.
2554 Value *OpTyVal = getNormalizedPoisonValue(OpTy);
2555 NewOp = buildIntrWithMD(Intrinsic::spv_track_constant,
2556 {OpTy, OpTyVal->getType()}, Op, OpTyVal, {}, B);
2557 }
2558 if (!IsConstComposite && isPointerTy(OpTy) && OpElemTy != nullptr &&
2559 OpElemTy != IntegerType::getInt8Ty(I->getContext())) {
2560 SmallVector<Type *, 2> Types = {OpTy, OpTy};
2561 SmallVector<Value *, 2> Args = {
2562 NewOp, buildMD(getNormalizedPoisonValue(OpElemTy)),
2563 B.getInt32(getPointerAddressSpace(OpTy))};
2564 CallInst *PtrCasted =
2565 B.CreateIntrinsic(Intrinsic::spv_ptrcast, {Types}, Args);
2566 GR->buildAssignPtr(B, OpElemTy, PtrCasted);
2567 NewOp = PtrCasted;
2568 }
2569 if (NewOp != Op)
2570 I->setOperand(OpNo, NewOp);
2571 }
2572 if (Named.insert(I).second)
2573 emitAssignName(I, B);
2574}
2575
2576Type *SPIRVEmitIntrinsics::deduceFunParamElementType(Function *F,
2577 unsigned OpIdx) {
2578 std::unordered_set<Function *> FVisited;
2579 return deduceFunParamElementType(F, OpIdx, FVisited);
2580}
2581
2582Type *SPIRVEmitIntrinsics::deduceFunParamElementType(
2583 Function *F, unsigned OpIdx, std::unordered_set<Function *> &FVisited) {
2584 // maybe a cycle
2585 if (!FVisited.insert(F).second)
2586 return nullptr;
2587
2588 std::unordered_set<Value *> Visited;
2590 // search in function's call sites
2591 for (User *U : F->users()) {
2592 CallInst *CI = dyn_cast<CallInst>(U);
2593 if (!CI || OpIdx >= CI->arg_size())
2594 continue;
2595 Value *OpArg = CI->getArgOperand(OpIdx);
2596 if (!isPointerTy(OpArg->getType()))
2597 continue;
2598 // maybe we already know operand's element type
2599 if (Type *KnownTy = GR->findDeducedElementType(OpArg))
2600 return KnownTy;
2601 // try to deduce from the operand itself
2602 Visited.clear();
2603 if (Type *Ty = deduceElementTypeHelper(OpArg, Visited, false))
2604 return Ty;
2605 // search in actual parameter's users
2606 for (User *OpU : OpArg->users()) {
2608 if (!Inst || Inst == CI)
2609 continue;
2610 Visited.clear();
2611 if (Type *Ty = deduceElementTypeHelper(Inst, Visited, false))
2612 return Ty;
2613 }
2614 // check if it's a formal parameter of the outer function
2615 if (!CI->getParent() || !CI->getParent()->getParent())
2616 continue;
2617 Function *OuterF = CI->getParent()->getParent();
2618 if (FVisited.find(OuterF) != FVisited.end())
2619 continue;
2620 for (unsigned i = 0; i < OuterF->arg_size(); ++i) {
2621 if (OuterF->getArg(i) == OpArg) {
2622 Lookup.push_back(std::make_pair(OuterF, i));
2623 break;
2624 }
2625 }
2626 }
2627
2628 // search in function parameters
2629 for (auto &Pair : Lookup) {
2630 if (Type *Ty = deduceFunParamElementType(Pair.first, Pair.second, FVisited))
2631 return Ty;
2632 }
2633
2634 return nullptr;
2635}
2636
2637void SPIRVEmitIntrinsics::processParamTypesByFunHeader(Function *F,
2638 IRBuilder<> &B) {
2639 B.SetInsertPointPastAllocas(F);
2640 for (unsigned OpIdx = 0; OpIdx < F->arg_size(); ++OpIdx) {
2641 Argument *Arg = F->getArg(OpIdx);
2642 if (!isUntypedPointerTy(Arg->getType()))
2643 continue;
2644 Type *ElemTy = GR->findDeducedElementType(Arg);
2645 if (ElemTy)
2646 continue;
2647 if (hasPointeeTypeAttr(Arg) &&
2648 (ElemTy = getPointeeTypeByAttr(Arg)) != nullptr) {
2649 GR->buildAssignPtr(B, ElemTy, Arg);
2650 continue;
2651 }
2652 // search in function's call sites
2653 for (User *U : F->users()) {
2654 CallInst *CI = dyn_cast<CallInst>(U);
2655 if (!CI || OpIdx >= CI->arg_size())
2656 continue;
2657 Value *OpArg = CI->getArgOperand(OpIdx);
2658 if (!isPointerTy(OpArg->getType()))
2659 continue;
2660 // maybe we already know operand's element type
2661 if ((ElemTy = GR->findDeducedElementType(OpArg)) != nullptr)
2662 break;
2663 }
2664 if (ElemTy) {
2665 GR->buildAssignPtr(B, ElemTy, Arg);
2666 continue;
2667 }
2668 if (HaveFunPtrs) {
2669 for (User *U : Arg->users()) {
2670 CallInst *CI = dyn_cast<CallInst>(U);
2671 if (CI && !isa<IntrinsicInst>(CI) && CI->isIndirectCall() &&
2672 CI->getCalledOperand() == Arg &&
2673 CI->getParent()->getParent() == CurrF) {
2675 deduceOperandElementTypeFunctionPointer(CI, Ops, ElemTy, false);
2676 if (ElemTy) {
2677 GR->buildAssignPtr(B, ElemTy, Arg);
2678 break;
2679 }
2680 }
2681 }
2682 }
2683 }
2684}
2685
2686void SPIRVEmitIntrinsics::processParamTypes(Function *F, IRBuilder<> &B) {
2687 B.SetInsertPointPastAllocas(F);
2688 for (unsigned OpIdx = 0; OpIdx < F->arg_size(); ++OpIdx) {
2689 Argument *Arg = F->getArg(OpIdx);
2690 if (!isUntypedPointerTy(Arg->getType()))
2691 continue;
2692 Type *ElemTy = GR->findDeducedElementType(Arg);
2693 if (!ElemTy && (ElemTy = deduceFunParamElementType(F, OpIdx)) != nullptr) {
2694 if (CallInst *AssignCI = GR->findAssignPtrTypeInstr(Arg)) {
2695 DenseSet<std::pair<Value *, Value *>> VisitedSubst;
2696 GR->updateAssignType(AssignCI, Arg, getNormalizedPoisonValue(ElemTy));
2697 propagateElemType(Arg, IntegerType::getInt8Ty(F->getContext()),
2698 VisitedSubst);
2699 } else {
2700 GR->buildAssignPtr(B, ElemTy, Arg);
2701 }
2702 }
2703 }
2704}
2705
2707 SPIRVGlobalRegistry *GR) {
2708 FunctionType *FTy = F->getFunctionType();
2709 bool IsNewFTy = false;
2711 for (Argument &Arg : F->args()) {
2712 Type *ArgTy = Arg.getType();
2713 if (ArgTy->isPointerTy())
2714 if (Type *ElemTy = GR->findDeducedElementType(&Arg)) {
2715 IsNewFTy = true;
2716 ArgTy = getTypedPointerWrapper(ElemTy, getPointerAddressSpace(ArgTy));
2717 }
2718 ArgTys.push_back(ArgTy);
2719 }
2720 return IsNewFTy
2721 ? FunctionType::get(FTy->getReturnType(), ArgTys, FTy->isVarArg())
2722 : FTy;
2723}
2724
2725bool SPIRVEmitIntrinsics::processFunctionPointers(Module &M) {
2726 SmallVector<Function *> Worklist;
2727 for (auto &F : M) {
2728 if (F.isIntrinsic())
2729 continue;
2730 if (F.isDeclaration()) {
2731 for (User *U : F.users()) {
2732 CallInst *CI = dyn_cast<CallInst>(U);
2733 if (!CI || CI->getCalledFunction() != &F) {
2734 Worklist.push_back(&F);
2735 break;
2736 }
2737 }
2738 } else {
2739 if (F.user_empty())
2740 continue;
2741 Type *FPElemTy = GR->findDeducedElementType(&F);
2742 if (!FPElemTy)
2743 FPElemTy = getFunctionPointerElemType(&F, GR);
2744 for (User *U : F.users()) {
2745 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
2746 if (!II || II->arg_size() != 3 || II->getOperand(0) != &F)
2747 continue;
2748 if (II->getIntrinsicID() == Intrinsic::spv_assign_ptr_type ||
2749 II->getIntrinsicID() == Intrinsic::spv_ptrcast) {
2751 break;
2752 }
2753 }
2754 }
2755 }
2756 if (Worklist.empty())
2757 return false;
2758
2759 std::string ServiceFunName = SPIRV_BACKEND_SERVICE_FUN_NAME;
2760 if (!getVacantFunctionName(M, ServiceFunName))
2762 "cannot allocate a name for the internal service function");
2763 LLVMContext &Ctx = M.getContext();
2764 Function *SF =
2765 Function::Create(FunctionType::get(Type::getVoidTy(Ctx), {}, false),
2766 GlobalValue::PrivateLinkage, ServiceFunName, M);
2768 BasicBlock *BB = BasicBlock::Create(Ctx, "entry", SF);
2769 IRBuilder<> IRB(BB);
2770
2771 for (Function *F : Worklist) {
2773 for (const auto &Arg : F->args())
2774 Args.push_back(getNormalizedPoisonValue(Arg.getType()));
2775 IRB.CreateCall(F, Args);
2776 }
2777 IRB.CreateRetVoid();
2778
2779 return true;
2780}
2781
2782// Apply types parsed from demangled function declarations.
2783void SPIRVEmitIntrinsics::applyDemangledPtrArgTypes(IRBuilder<> &B) {
2784 DenseMap<Function *, CallInst *> Ptrcasts;
2785 for (auto It : FDeclPtrTys) {
2786 Function *F = It.first;
2787 for (auto *U : F->users()) {
2788 CallInst *CI = dyn_cast<CallInst>(U);
2789 if (!CI || CI->getCalledFunction() != F)
2790 continue;
2791 unsigned Sz = CI->arg_size();
2792 for (auto [Idx, ElemTy] : It.second) {
2793 if (Idx >= Sz)
2794 continue;
2795 Value *Param = CI->getArgOperand(Idx);
2796 if (GR->findDeducedElementType(Param) || isa<GlobalValue>(Param))
2797 continue;
2798 if (Argument *Arg = dyn_cast<Argument>(Param)) {
2799 if (!hasPointeeTypeAttr(Arg)) {
2800 B.SetInsertPointPastAllocas(Arg->getParent());
2801 B.SetCurrentDebugLocation(DebugLoc());
2802 GR->buildAssignPtr(B, ElemTy, Arg);
2803 }
2804 } else if (isa<GetElementPtrInst>(Param)) {
2805 replaceUsesOfWithSpvPtrcast(Param, normalizeType(ElemTy), CI,
2806 Ptrcasts);
2807 } else if (isa<Instruction>(Param)) {
2808 GR->addDeducedElementType(Param, normalizeType(ElemTy));
2809 // insertAssignTypeIntrs() will complete buildAssignPtr()
2810 } else {
2811 B.SetInsertPoint(CI->getParent()
2812 ->getParent()
2813 ->getEntryBlock()
2814 .getFirstNonPHIOrDbgOrAlloca());
2815 GR->buildAssignPtr(B, ElemTy, Param);
2816 }
2817 CallInst *Ref = dyn_cast<CallInst>(Param);
2818 if (!Ref)
2819 continue;
2820 Function *RefF = Ref->getCalledFunction();
2821 if (!RefF || !isPointerTy(RefF->getReturnType()) ||
2822 GR->findDeducedElementType(RefF))
2823 continue;
2824 ElemTy = normalizeType(ElemTy);
2825 GR->addDeducedElementType(RefF, ElemTy);
2826 GR->addReturnType(
2828 ElemTy, getPointerAddressSpace(RefF->getReturnType())));
2829 }
2830 }
2831 }
2832}
2833
2834GetElementPtrInst *
2835SPIRVEmitIntrinsics::simplifyZeroLengthArrayGepInst(GetElementPtrInst *GEP) {
2836 // getelementptr [0 x T], P, 0 (zero), I -> getelementptr T, P, I.
2837 // If type is 0-length array and first index is 0 (zero), drop both the
2838 // 0-length array type and the first index. This is a common pattern in
2839 // the IR, e.g. when using a zero-length array as a placeholder for a
2840 // flexible array such as unbound arrays.
2841 assert(GEP && "GEP is null");
2842 Type *SrcTy = GEP->getSourceElementType();
2843 SmallVector<Value *, 8> Indices(GEP->indices());
2844 ArrayType *ArrTy = dyn_cast<ArrayType>(SrcTy);
2845 if (ArrTy && ArrTy->getNumElements() == 0 &&
2847 Indices.erase(Indices.begin());
2848 SrcTy = ArrTy->getElementType();
2849 return GetElementPtrInst::Create(SrcTy, GEP->getPointerOperand(), Indices,
2850 GEP->getNoWrapFlags(), "",
2851 GEP->getIterator());
2852 }
2853 return nullptr;
2854}
2855
2856bool SPIRVEmitIntrinsics::runOnFunction(Function &Func) {
2857 if (Func.isDeclaration())
2858 return false;
2859
2860 const SPIRVSubtarget &ST = TM->getSubtarget<SPIRVSubtarget>(Func);
2861 GR = ST.getSPIRVGlobalRegistry();
2862
2863 if (!CurrF)
2864 HaveFunPtrs =
2865 ST.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers);
2866
2867 CurrF = &Func;
2868 IRBuilder<> B(Func.getContext());
2869 AggrConsts.clear();
2870 AggrConstTypes.clear();
2871 AggrStores.clear();
2872
2873 // Fix GEP result types ahead of inference, and simplify if possible.
2874 // Data structure for dead instructions that were simplified and replaced.
2875 SmallPtrSet<Instruction *, 4> DeadInsts;
2876 for (auto &I : instructions(Func)) {
2878 if (!Ref || GR->findDeducedElementType(Ref))
2879 continue;
2880
2881 GetElementPtrInst *NewGEP = simplifyZeroLengthArrayGepInst(Ref);
2882 if (NewGEP) {
2883 Ref->replaceAllUsesWith(NewGEP);
2884 DeadInsts.insert(Ref);
2885 Ref = NewGEP;
2886 }
2887 if (Type *GepTy = getGEPType(Ref))
2888 GR->addDeducedElementType(Ref, normalizeType(GepTy));
2889 }
2890 // Remove dead instructions that were simplified and replaced.
2891 for (auto *I : DeadInsts) {
2892 assert(I->use_empty() && "Dead instruction should not have any uses left");
2893 I->eraseFromParent();
2894 }
2895
2896 processParamTypesByFunHeader(CurrF, B);
2897
2898 // StoreInst's operand type can be changed during the next
2899 // transformations, so we need to store it in the set. Also store already
2900 // transformed types.
2901 for (auto &I : instructions(Func)) {
2902 StoreInst *SI = dyn_cast<StoreInst>(&I);
2903 if (!SI)
2904 continue;
2905 Type *ElTy = SI->getValueOperand()->getType();
2906 if (ElTy->isAggregateType() || ElTy->isVectorTy())
2907 AggrStores.insert(&I);
2908 }
2909
2910 B.SetInsertPoint(&Func.getEntryBlock(), Func.getEntryBlock().begin());
2911 for (auto &GV : Func.getParent()->globals())
2912 processGlobalValue(GV, B);
2913
2914 preprocessUndefs(B);
2915 preprocessCompositeConstants(B);
2918
2919 applyDemangledPtrArgTypes(B);
2920
2921 // Pass forward: use operand to deduce instructions result.
2922 for (auto &I : Worklist) {
2923 // Don't emit intrinsincs for convergence intrinsics.
2924 if (isConvergenceIntrinsic(I))
2925 continue;
2926
2927 bool Postpone = insertAssignPtrTypeIntrs(I, B, false);
2928 // if Postpone is true, we can't decide on pointee type yet
2929 insertAssignTypeIntrs(I, B);
2930 insertPtrCastOrAssignTypeInstr(I, B);
2932 // if instruction requires a pointee type set, let's check if we know it
2933 // already, and force it to be i8 if not
2934 if (Postpone && !GR->findAssignPtrTypeInstr(I))
2935 insertAssignPtrTypeIntrs(I, B, true);
2936
2937 if (auto *FPI = dyn_cast<ConstrainedFPIntrinsic>(I))
2938 useRoundingMode(FPI, B);
2939 }
2940
2941 // Pass backward: use instructions results to specify/update/cast operands
2942 // where needed.
2943 SmallPtrSet<Instruction *, 4> IncompleteRets;
2944 for (auto &I : llvm::reverse(instructions(Func)))
2945 deduceOperandElementType(&I, &IncompleteRets);
2946
2947 // Pass forward for PHIs only, their operands are not preceed the
2948 // instruction in meaning of `instructions(Func)`.
2949 for (BasicBlock &BB : Func)
2950 for (PHINode &Phi : BB.phis())
2951 if (isPointerTy(Phi.getType()))
2952 deduceOperandElementType(&Phi, nullptr);
2953
2954 for (auto *I : Worklist) {
2955 TrackConstants = true;
2956 if (!I->getType()->isVoidTy() || isa<StoreInst>(I))
2958 // Visitors return either the original/newly created instruction for
2959 // further processing, nullptr otherwise.
2960 I = visit(*I);
2961 if (!I)
2962 continue;
2963
2964 // Don't emit intrinsics for convergence operations.
2965 if (isConvergenceIntrinsic(I))
2966 continue;
2967
2969 processInstrAfterVisit(I, B);
2970 }
2971
2972 return true;
2973}
2974
2975// Try to deduce a better type for pointers to untyped ptr.
2976bool SPIRVEmitIntrinsics::postprocessTypes(Module &M) {
2977 if (!GR || TodoTypeSz == 0)
2978 return false;
2979
2980 unsigned SzTodo = TodoTypeSz;
2981 DenseMap<Value *, SmallPtrSet<Value *, 4>> ToProcess;
2982 for (auto [Op, Enabled] : TodoType) {
2983 // TODO: add isa<CallInst>(Op) to continue
2985 continue;
2986 CallInst *AssignCI = GR->findAssignPtrTypeInstr(Op);
2987 Type *KnownTy = GR->findDeducedElementType(Op);
2988 if (!KnownTy || !AssignCI)
2989 continue;
2990 assert(Op == AssignCI->getArgOperand(0));
2991 // Try to improve the type deduced after all Functions are processed.
2992 if (auto *CI = dyn_cast<Instruction>(Op)) {
2993 CurrF = CI->getParent()->getParent();
2994 std::unordered_set<Value *> Visited;
2995 if (Type *ElemTy = deduceElementTypeHelper(Op, Visited, false, true)) {
2996 if (ElemTy != KnownTy) {
2997 DenseSet<std::pair<Value *, Value *>> VisitedSubst;
2998 propagateElemType(CI, ElemTy, VisitedSubst);
2999 eraseTodoType(Op);
3000 continue;
3001 }
3002 }
3003 }
3004
3005 if (Op->hasUseList()) {
3006 for (User *U : Op->users()) {
3008 if (Inst && !isa<IntrinsicInst>(Inst))
3009 ToProcess[Inst].insert(Op);
3010 }
3011 }
3012 }
3013 if (TodoTypeSz == 0)
3014 return true;
3015
3016 for (auto &F : M) {
3017 CurrF = &F;
3018 SmallPtrSet<Instruction *, 4> IncompleteRets;
3019 for (auto &I : llvm::reverse(instructions(F))) {
3020 auto It = ToProcess.find(&I);
3021 if (It == ToProcess.end())
3022 continue;
3023 It->second.remove_if([this](Value *V) { return !isTodoType(V); });
3024 if (It->second.size() == 0)
3025 continue;
3026 deduceOperandElementType(&I, &IncompleteRets, &It->second, true);
3027 if (TodoTypeSz == 0)
3028 return true;
3029 }
3030 }
3031
3032 return SzTodo > TodoTypeSz;
3033}
3034
3035// Parse and store argument types of function declarations where needed.
3036void SPIRVEmitIntrinsics::parseFunDeclarations(Module &M) {
3037 for (auto &F : M) {
3038 if (!F.isDeclaration() || F.isIntrinsic())
3039 continue;
3040 // get the demangled name
3041 std::string DemangledName = getOclOrSpirvBuiltinDemangledName(F.getName());
3042 if (DemangledName.empty())
3043 continue;
3044 // allow only OpGroupAsyncCopy use case at the moment
3045 const SPIRVSubtarget &ST = TM->getSubtarget<SPIRVSubtarget>(F);
3046 auto [Grp, Opcode, ExtNo] = SPIRV::mapBuiltinToOpcode(
3047 DemangledName, ST.getPreferredInstructionSet());
3048 if (Opcode != SPIRV::OpGroupAsyncCopy)
3049 continue;
3050 // find pointer arguments
3051 SmallVector<unsigned> Idxs;
3052 for (unsigned OpIdx = 0; OpIdx < F.arg_size(); ++OpIdx) {
3053 Argument *Arg = F.getArg(OpIdx);
3054 if (isPointerTy(Arg->getType()) && !hasPointeeTypeAttr(Arg))
3055 Idxs.push_back(OpIdx);
3056 }
3057 if (!Idxs.size())
3058 continue;
3059 // parse function arguments
3060 LLVMContext &Ctx = F.getContext();
3062 SPIRV::parseBuiltinTypeStr(TypeStrs, DemangledName, Ctx);
3063 if (!TypeStrs.size())
3064 continue;
3065 // find type info for pointer arguments
3066 for (unsigned Idx : Idxs) {
3067 if (Idx >= TypeStrs.size())
3068 continue;
3069 if (Type *ElemTy =
3070 SPIRV::parseBuiltinCallArgumentType(TypeStrs[Idx].trim(), Ctx))
3072 !ElemTy->isTargetExtTy())
3073 FDeclPtrTys[&F].push_back(std::make_pair(Idx, ElemTy));
3074 }
3075 }
3076}
3077
3078bool SPIRVEmitIntrinsics::runOnModule(Module &M) {
3079 bool Changed = false;
3080
3081 parseFunDeclarations(M);
3082 insertConstantsForFPFastMathDefault(M);
3083
3084 TodoType.clear();
3085 for (auto &F : M)
3087
3088 // Specify function parameters after all functions were processed.
3089 for (auto &F : M) {
3090 // check if function parameter types are set
3091 CurrF = &F;
3092 if (!F.isDeclaration() && !F.isIntrinsic()) {
3093 IRBuilder<> B(F.getContext());
3094 processParamTypes(&F, B);
3095 }
3096 }
3097
3098 CanTodoType = false;
3099 Changed |= postprocessTypes(M);
3100
3101 if (HaveFunPtrs)
3102 Changed |= processFunctionPointers(M);
3103
3104 return Changed;
3105}
3106
3108 return new SPIRVEmitIntrinsics(TM);
3109}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
aarch64 promote const
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
always inline
Expand Atomic instructions
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
static void replaceAllUsesWith(Value *Old, Value *New, SmallPtrSet< BasicBlock *, 32 > &FreshBBs, bool IsHuge)
Replace all old uses with new ones, and push the updated BBs into FreshBBs.
This file defines the DenseSet and SmallDenseSet classes.
static bool runOnFunction(Function &F, bool PostInlining)
Hexagon Common GEP
iv Induction Variable Users
Definition IVUsers.cpp:48
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Check Debug Module
#define T
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
Function * Fun
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
static unsigned getNumElements(Type *Ty)
static bool isMemInstrToReplace(Instruction *I)
static bool isAggrConstForceInt32(const Value *V)
static SPIRV::FPFastMathDefaultInfoVector & getOrCreateFPFastMathDefaultInfoVec(const Module &M, DenseMap< Function *, SPIRV::FPFastMathDefaultInfoVector > &FPFastMathDefaultInfoMap, Function *F)
static Type * getAtomicElemTy(SPIRVGlobalRegistry *GR, Instruction *I, Value *PointerOperand)
static void reportFatalOnTokenType(const Instruction *I)
static void setInsertPointAfterDef(IRBuilder<> &B, Instruction *I)
static void emitAssignName(Instruction *I, IRBuilder<> &B)
static Type * getPointeeTypeByCallInst(StringRef DemangledName, Function *CalledF, unsigned OpIdx)
static void createRoundingModeDecoration(Instruction *I, unsigned RoundingModeDeco, IRBuilder<> &B)
static void createDecorationIntrinsic(Instruction *I, MDNode *Node, IRBuilder<> &B)
static SPIRV::FPFastMathDefaultInfo & getFPFastMathDefaultInfo(SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec, const Type *Ty)
static bool IsKernelArgInt8(Function *F, StoreInst *SI)
static void addSaturatedDecorationToIntrinsic(Instruction *I, IRBuilder<> &B)
static bool isFirstIndexZero(const GetElementPtrInst *GEP)
static void setInsertPointSkippingPhis(IRBuilder<> &B, Instruction *I)
static FunctionType * getFunctionPointerElemType(Function *F, SPIRVGlobalRegistry *GR)
static void createSaturatedConversionDecoration(Instruction *I, IRBuilder<> &B)
static Type * restoreMutatedType(SPIRVGlobalRegistry *GR, Instruction *I, Type *Ty)
static bool requireAssignType(Instruction *I)
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
static void insertSpirvDecorations(MachineFunction &MF, SPIRVGlobalRegistry *GR, MachineIRBuilder MIB)
#define SPIRV_BACKEND_SERVICE_FUN_NAME
Definition SPIRVUtils.h:523
StringSet - A set-like wrapper for the StringMap.
DEMANGLE_NAMESPACE_BEGIN bool starts_with(std::string_view self, char C) noexcept
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
const Function * getParent() const
Definition Argument.h:44
static unsigned getPointerOperandIndex()
static unsigned getPointerOperandIndex()
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:528
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
static LLVM_ABI BlockAddress * get(Function *F, BasicBlock *BB)
Return a BlockAddress for the specified function and basic block.
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static ConstantAsMetadata * get(Constant *C)
Definition Metadata.h:536
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:256
iterator end()
Definition DenseMap.h:81
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
void addFnAttr(Attribute::AttrKind Kind)
Add function attributes to this function.
Definition Function.cpp:640
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition Function.h:166
const DataLayout & getDataLayout() const
Get the data layout of the module this function belongs to.
Definition Function.cpp:363
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition Function.h:244
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
Definition Function.h:249
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
size_t arg_size() const
Definition Function.h:899
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:214
Argument * getArg(unsigned i) const
Definition Function.h:884
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static unsigned getPointerOperandIndex()
PointerType * getType() const
Global values are always pointers.
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition GlobalValue.h:61
@ InternalLinkage
Rename collisions when linking (static functions).
Definition GlobalValue.h:60
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2788
LLVM_ABI void addDestination(BasicBlock *Dest)
Add a destination.
Base class for instruction visitors.
Definition InstVisitor.h:78
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
static unsigned getPointerOperandIndex()
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1569
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1448
static LLVM_ABI MDString * get(LLVMContext &Context, StringRef Str)
Definition Metadata.cpp:608
Flags
Flags values. These may be or'd together.
static LLVM_ABI MetadataAsValue * get(LLVMContext &Context, Metadata *MD)
Definition Metadata.cpp:104
Metadata * getMetadata() const
Definition Metadata.h:201
ModulePass class - This class is used to implement unstructured interprocedural optimizations and ana...
Definition Pass.h:255
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
void addAssignPtrTypeInstr(Value *Val, CallInst *AssignPtrTyCI)
void buildAssignPtr(IRBuilder<> &B, Type *ElemTy, Value *Arg)
Type * findDeducedCompositeType(const Value *Val)
void replaceAllUsesWith(Value *Old, Value *New, bool DeleteOld=true)
void addDeducedElementType(Value *Val, Type *Ty)
void addReturnType(const Function *ArgF, TypedPointerType *DerivedTy)
Type * findMutated(const Value *Val)
void addDeducedCompositeType(Value *Val, Type *Ty)
void buildAssignType(IRBuilder<> &B, Type *Ty, Value *Arg)
Type * findDeducedElementType(const Value *Val)
void updateAssignType(CallInst *AssignCI, Value *Arg, Value *OfType)
CallInst * findAssignPtrTypeInstr(const Value *Val)
const SPIRVTargetLowering * getTargetLowering() const override
bool isLogicalSPIRV() const
bool canUseExtension(SPIRV::Extension::Extension E) const
const SPIRVSubtarget * getSubtargetImpl() const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
static unsigned getPointerOperandIndex()
iterator end()
Definition StringMap.h:224
iterator find(StringRef Key)
Definition StringMap.h:237
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
bool contains(StringRef key) const
Check if the set contains the given key.
Definition StringSet.h:60
static LLVM_ABI StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Definition Type.cpp:619
static LLVM_ABI TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types={}, ArrayRef< unsigned > Ints={})
Return a target extension type having the specified name and optional type and integer parameters.
Definition Type.cpp:907
const STC & getSubtarget(const Function &F) const
This method returns a pointer to the specified type of TargetSubtargetInfo.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:264
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
Type * getArrayElementType() const
Definition Type.h:408
LLVM_ABI StringRef getTargetExtName() const
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:294
bool isStructTy() const
True if this is an instance of StructType.
Definition Type.h:261
bool isTargetExtTy() const
Return true if this is a target extension type.
Definition Type.h:203
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition Type.h:304
static LLVM_ABI Type * getDoubleTy(LLVMContext &C)
Definition Type.cpp:285
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
Definition Type.cpp:284
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
Definition Type.cpp:282
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
static LLVM_ABI bool isValidElementType(Type *ElemTy)
Return true if the specified type is valid as a element type.
static LLVM_ABI TypedPointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
op_range operands()
Definition User.h:292
void setOperand(unsigned i, Value *Val)
Definition User.h:237
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition User.cpp:24
Value * getOperand(unsigned i) const
Definition User.h:232
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:390
iterator_range< user_iterator > users()
Definition Value.h:426
bool use_empty() const
Definition Value.h:346
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
bool user_empty() const
Definition Value.h:389
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:202
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition DenseSet.h:175
const ParentTy * getParent() const
Definition ilist_node.h:34
CallInst * Call
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
bool match(Val *V, const Pattern &P)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
DenseSetImpl< ValueT, DenseMap< ValueT, DenseSetEmpty, ValueInfoT, DenseSetPair< ValueT > >, ValueInfoT > DenseSet
Definition DenseSet.h:264
ElementType
The element type of an SRV or UAV resource.
Definition DXILABI.h:60
@ User
could "use" a pointer
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
NodeAddr< FuncNode * > Func
Definition RDFGraph.h:393
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
bool getVacantFunctionName(Module &M, std::string &Name)
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool isTypedPointerWrapper(const TargetExtType *ExtTy)
Definition SPIRVUtils.h:401
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2494
ModulePass * createSPIRVEmitIntrinsicsPass(SPIRVTargetMachine *TM)
unsigned getPointerAddressSpace(const Type *T)
Definition SPIRVUtils.h:365
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
CallInst * buildIntrWithMD(Intrinsic::ID IntrID, ArrayRef< Type * > Types, Value *Arg, Value *Arg2, ArrayRef< Constant * > Imms, IRBuilder<> &B)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2148
FPDecorationId
Definition SPIRVUtils.h:547
bool isNestedPointer(const Type *Ty)
MetadataAsValue * buildMD(Value *Arg)
Definition SPIRVUtils.h:511
std::string getOclOrSpirvBuiltinDemangledName(StringRef Name)
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
Type * getTypedPointerWrapper(Type *ElemTy, unsigned AS)
Definition SPIRVUtils.h:396
bool isVector1(Type *Ty)
Definition SPIRVUtils.h:489
bool isPointerTy(const Type *T)
Definition SPIRVUtils.h:359
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
@ Ref
The access may reference the value stored in memory.
Definition ModRef.h:32
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
DWARFExpression::Operation Op
Type * getPointeeTypeByAttr(Argument *Arg)
Definition SPIRVUtils.h:378
bool hasPointeeTypeAttr(Argument *Arg)
Definition SPIRVUtils.h:373
constexpr unsigned BitWidth
bool isEquivalentTypes(Type *Ty1, Type *Ty2)
Definition SPIRVUtils.h:451
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition iterator.h:363
bool hasInitializer(const GlobalVariable *GV)
Definition SPIRVUtils.h:344
Type * normalizeType(Type *Ty)
Definition SPIRVUtils.h:497
@ Enabled
Convert any .debug_str_offsets tables to DWARF64 if needed.
Definition DWP.h:27
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
Type * getPointeeType(const Type *Ty)
Definition SPIRVUtils.h:428
PoisonValue * getNormalizedPoisonValue(Type *Ty)
Definition SPIRVUtils.h:507
bool isUntypedPointerTy(const Type *T)
Definition SPIRVUtils.h:354
Type * reconstitutePeeledArrayType(Type *Ty)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
static size_t computeFPFastMathDefaultInfoVecIndex(size_t BitWidth)
Definition SPIRVUtils.h:146