LLVM 22.0.0git
SPIRVEmitIntrinsics.cpp
Go to the documentation of this file.
1//===-- SPIRVEmitIntrinsics.cpp - emit SPIRV intrinsics ---------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The pass emits SPIRV intrinsics keeping essential high-level information for
10// the translation of LLVM IR to SPIR-V.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SPIRV.h"
15#include "SPIRVBuiltins.h"
16#include "SPIRVSubtarget.h"
17#include "SPIRVTargetMachine.h"
18#include "SPIRVUtils.h"
19#include "llvm/ADT/DenseSet.h"
20#include "llvm/ADT/StringSet.h"
21#include "llvm/IR/IRBuilder.h"
23#include "llvm/IR/InstVisitor.h"
24#include "llvm/IR/IntrinsicsSPIRV.h"
28
29#include <cassert>
30#include <queue>
31#include <unordered_set>
32
33// This pass performs the following transformation on LLVM IR level required
34// for the following translation to SPIR-V:
35// - replaces direct usages of aggregate constants with target-specific
36// intrinsics;
37// - replaces aggregates-related instructions (extract/insert, ld/st, etc)
38// with a target-specific intrinsics;
39// - emits intrinsics for the global variable initializers since IRTranslator
40// doesn't handle them and it's not very convenient to translate them
41// ourselves;
42// - emits intrinsics to keep track of the string names assigned to the values;
43// - emits intrinsics to keep track of constants (this is necessary to have an
44// LLVM IR constant after the IRTranslation is completed) for their further
45// deduplication;
46// - emits intrinsics to keep track of original LLVM types of the values
47// to be able to emit proper SPIR-V types eventually.
48//
49// TODO: consider removing spv.track.constant in favor of spv.assign.type.
50
51using namespace llvm;
52
53namespace llvm::SPIRV {
54#define GET_BuiltinGroup_DECL
55#include "SPIRVGenTables.inc"
56} // namespace llvm::SPIRV
57
58namespace {
59
60class SPIRVEmitIntrinsics
61 : public ModulePass,
62 public InstVisitor<SPIRVEmitIntrinsics, Instruction *> {
63 SPIRVTargetMachine *TM = nullptr;
64 SPIRVGlobalRegistry *GR = nullptr;
65 Function *CurrF = nullptr;
66 bool TrackConstants = true;
67 bool HaveFunPtrs = false;
68 DenseMap<Instruction *, Constant *> AggrConsts;
69 DenseMap<Instruction *, Type *> AggrConstTypes;
70 DenseSet<Instruction *> AggrStores;
71 std::unordered_set<Value *> Named;
72
73 // map of function declarations to <pointer arg index => element type>
74 DenseMap<Function *, SmallVector<std::pair<unsigned, Type *>>> FDeclPtrTys;
75
76 // a register of Instructions that don't have a complete type definition
77 bool CanTodoType = true;
78 unsigned TodoTypeSz = 0;
79 DenseMap<Value *, bool> TodoType;
80 void insertTodoType(Value *Op) {
81 // TODO: add isa<CallInst>(Op) to no-insert
82 if (CanTodoType && !isa<GetElementPtrInst>(Op)) {
83 auto It = TodoType.try_emplace(Op, true);
84 if (It.second)
85 ++TodoTypeSz;
86 }
87 }
88 void eraseTodoType(Value *Op) {
89 auto It = TodoType.find(Op);
90 if (It != TodoType.end() && It->second) {
91 It->second = false;
92 --TodoTypeSz;
93 }
94 }
95 bool isTodoType(Value *Op) {
97 return false;
98 auto It = TodoType.find(Op);
99 return It != TodoType.end() && It->second;
100 }
101 // a register of Instructions that were visited by deduceOperandElementType()
102 // to validate operand types with an instruction
103 std::unordered_set<Instruction *> TypeValidated;
104
105 // well known result types of builtins
106 enum WellKnownTypes { Event };
107
108 // deduce element type of untyped pointers
109 Type *deduceElementType(Value *I, bool UnknownElemTypeI8);
110 Type *deduceElementTypeHelper(Value *I, bool UnknownElemTypeI8);
111 Type *deduceElementTypeHelper(Value *I, std::unordered_set<Value *> &Visited,
112 bool UnknownElemTypeI8,
113 bool IgnoreKnownType = false);
114 Type *deduceElementTypeByValueDeep(Type *ValueTy, Value *Operand,
115 bool UnknownElemTypeI8);
116 Type *deduceElementTypeByValueDeep(Type *ValueTy, Value *Operand,
117 std::unordered_set<Value *> &Visited,
118 bool UnknownElemTypeI8);
119 Type *deduceElementTypeByUsersDeep(Value *Op,
120 std::unordered_set<Value *> &Visited,
121 bool UnknownElemTypeI8);
122 void maybeAssignPtrType(Type *&Ty, Value *I, Type *RefTy,
123 bool UnknownElemTypeI8);
124
125 // deduce nested types of composites
126 Type *deduceNestedTypeHelper(User *U, bool UnknownElemTypeI8);
127 Type *deduceNestedTypeHelper(User *U, Type *Ty,
128 std::unordered_set<Value *> &Visited,
129 bool UnknownElemTypeI8);
130
131 // deduce Types of operands of the Instruction if possible
132 void deduceOperandElementType(Instruction *I,
133 SmallPtrSet<Instruction *, 4> *IncompleteRets,
134 const SmallPtrSet<Value *, 4> *AskOps = nullptr,
135 bool IsPostprocessing = false);
136
137 void preprocessCompositeConstants(IRBuilder<> &B);
138 void preprocessUndefs(IRBuilder<> &B);
139
140 Type *reconstructType(Value *Op, bool UnknownElemTypeI8,
141 bool IsPostprocessing);
142
143 void replaceMemInstrUses(Instruction *Old, Instruction *New, IRBuilder<> &B);
144 void processInstrAfterVisit(Instruction *I, IRBuilder<> &B);
145 bool insertAssignPtrTypeIntrs(Instruction *I, IRBuilder<> &B,
146 bool UnknownElemTypeI8);
147 void insertAssignTypeIntrs(Instruction *I, IRBuilder<> &B);
148 void insertAssignPtrTypeTargetExt(TargetExtType *AssignedType, Value *V,
149 IRBuilder<> &B);
150 void replacePointerOperandWithPtrCast(Instruction *I, Value *Pointer,
151 Type *ExpectedElementType,
152 unsigned OperandToReplace,
153 IRBuilder<> &B);
154 void insertPtrCastOrAssignTypeInstr(Instruction *I, IRBuilder<> &B);
155 bool shouldTryToAddMemAliasingDecoration(Instruction *Inst);
157 void insertConstantsForFPFastMathDefault(Module &M);
158 void processGlobalValue(GlobalVariable &GV, IRBuilder<> &B);
159 void processParamTypes(Function *F, IRBuilder<> &B);
160 void processParamTypesByFunHeader(Function *F, IRBuilder<> &B);
161 Type *deduceFunParamElementType(Function *F, unsigned OpIdx);
162 Type *deduceFunParamElementType(Function *F, unsigned OpIdx,
163 std::unordered_set<Function *> &FVisited);
164
165 bool deduceOperandElementTypeCalledFunction(
166 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
167 Type *&KnownElemTy, bool &Incomplete);
168 void deduceOperandElementTypeFunctionPointer(
169 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
170 Type *&KnownElemTy, bool IsPostprocessing);
171 bool deduceOperandElementTypeFunctionRet(
172 Instruction *I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
173 const SmallPtrSet<Value *, 4> *AskOps, bool IsPostprocessing,
174 Type *&KnownElemTy, Value *Op, Function *F);
175
176 CallInst *buildSpvPtrcast(Function *F, Value *Op, Type *ElemTy);
177 void replaceUsesOfWithSpvPtrcast(Value *Op, Type *ElemTy, Instruction *I,
178 DenseMap<Function *, CallInst *> Ptrcasts);
179 void propagateElemType(Value *Op, Type *ElemTy,
180 DenseSet<std::pair<Value *, Value *>> &VisitedSubst);
181 void
182 propagateElemTypeRec(Value *Op, Type *PtrElemTy, Type *CastElemTy,
183 DenseSet<std::pair<Value *, Value *>> &VisitedSubst);
184 void propagateElemTypeRec(Value *Op, Type *PtrElemTy, Type *CastElemTy,
185 DenseSet<std::pair<Value *, Value *>> &VisitedSubst,
186 std::unordered_set<Value *> &Visited,
187 DenseMap<Function *, CallInst *> Ptrcasts);
188
189 void replaceAllUsesWith(Value *Src, Value *Dest, bool DeleteOld = true);
190 void replaceAllUsesWithAndErase(IRBuilder<> &B, Instruction *Src,
191 Instruction *Dest, bool DeleteOld = true);
192
193 void applyDemangledPtrArgTypes(IRBuilder<> &B);
194
195 GetElementPtrInst *simplifyZeroLengthArrayGepInst(GetElementPtrInst *GEP);
196
197 bool runOnFunction(Function &F);
198 bool postprocessTypes(Module &M);
199 bool processFunctionPointers(Module &M);
200 void parseFunDeclarations(Module &M);
201
202 void useRoundingMode(ConstrainedFPIntrinsic *FPI, IRBuilder<> &B);
203
204 // Tries to walk the type accessed by the given GEP instruction.
205 // For each nested type access, one of the 2 callbacks is called:
206 // - OnLiteralIndexing when the index is a known constant value.
207 // Parameters:
208 // PointedType: the pointed type resulting of this indexing.
209 // If the parent type is an array, this is the index in the array.
210 // If the parent type is a struct, this is the field index.
211 // Index: index of the element in the parent type.
212 // - OnDynamnicIndexing when the index is a non-constant value.
213 // This callback is only called when indexing into an array.
214 // Parameters:
215 // ElementType: the type of the elements stored in the parent array.
216 // Offset: the Value* containing the byte offset into the array.
217 // Return true if an error occured during the walk, false otherwise.
218 bool walkLogicalAccessChain(
219 GetElementPtrInst &GEP,
220 const std::function<void(Type *PointedType, uint64_t Index)>
221 &OnLiteralIndexing,
222 const std::function<void(Type *ElementType, Value *Offset)>
223 &OnDynamicIndexing);
224
225 // Returns the type accessed using the given GEP instruction by relying
226 // on the GEP type.
227 // FIXME: GEP types are not supposed to be used to retrieve the pointed
228 // type. This must be fixed.
229 Type *getGEPType(GetElementPtrInst *GEP);
230
231 // Returns the type accessed using the given GEP instruction by walking
232 // the source type using the GEP indices.
233 // FIXME: without help from the frontend, this method cannot reliably retrieve
234 // the stored type, nor can robustly determine the depth of the type
235 // we are accessing.
236 Type *getGEPTypeLogical(GetElementPtrInst *GEP);
237
238 Instruction *buildLogicalAccessChainFromGEP(GetElementPtrInst &GEP);
239
240public:
241 static char ID;
242 SPIRVEmitIntrinsics(SPIRVTargetMachine *TM = nullptr)
243 : ModulePass(ID), TM(TM) {}
244 Instruction *visitInstruction(Instruction &I) { return &I; }
245 Instruction *visitSwitchInst(SwitchInst &I);
246 Instruction *visitGetElementPtrInst(GetElementPtrInst &I);
247 Instruction *visitBitCastInst(BitCastInst &I);
248 Instruction *visitInsertElementInst(InsertElementInst &I);
249 Instruction *visitExtractElementInst(ExtractElementInst &I);
250 Instruction *visitInsertValueInst(InsertValueInst &I);
251 Instruction *visitExtractValueInst(ExtractValueInst &I);
252 Instruction *visitLoadInst(LoadInst &I);
253 Instruction *visitStoreInst(StoreInst &I);
254 Instruction *visitAllocaInst(AllocaInst &I);
255 Instruction *visitAtomicCmpXchgInst(AtomicCmpXchgInst &I);
256 Instruction *visitUnreachableInst(UnreachableInst &I);
257 Instruction *visitCallInst(CallInst &I);
258
259 StringRef getPassName() const override { return "SPIRV emit intrinsics"; }
260
261 bool runOnModule(Module &M) override;
262
263 void getAnalysisUsage(AnalysisUsage &AU) const override {
264 ModulePass::getAnalysisUsage(AU);
265 }
266};
267
268bool isConvergenceIntrinsic(const Instruction *I) {
269 const auto *II = dyn_cast<IntrinsicInst>(I);
270 if (!II)
271 return false;
272
273 return II->getIntrinsicID() == Intrinsic::experimental_convergence_entry ||
274 II->getIntrinsicID() == Intrinsic::experimental_convergence_loop ||
275 II->getIntrinsicID() == Intrinsic::experimental_convergence_anchor;
276}
277
278bool expectIgnoredInIRTranslation(const Instruction *I) {
279 const auto *II = dyn_cast<IntrinsicInst>(I);
280 if (!II)
281 return false;
282 switch (II->getIntrinsicID()) {
283 case Intrinsic::invariant_start:
284 case Intrinsic::spv_resource_handlefrombinding:
285 case Intrinsic::spv_resource_getpointer:
286 return true;
287 default:
288 return false;
289 }
290}
291
292// Returns the source pointer from `I` ignoring intermediate ptrcast.
293Value *getPointerRoot(Value *I) {
294 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
295 if (II->getIntrinsicID() == Intrinsic::spv_ptrcast) {
296 Value *V = II->getArgOperand(0);
297 return getPointerRoot(V);
298 }
299 }
300 return I;
301}
302
303} // namespace
304
305char SPIRVEmitIntrinsics::ID = 0;
306
307INITIALIZE_PASS(SPIRVEmitIntrinsics, "emit-intrinsics", "SPIRV emit intrinsics",
308 false, false)
309
310static inline bool isAssignTypeInstr(const Instruction *I) {
311 return isa<IntrinsicInst>(I) &&
312 cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::spv_assign_type;
313}
314
319
320static bool isAggrConstForceInt32(const Value *V) {
321 return isa<ConstantArray>(V) || isa<ConstantStruct>(V) ||
323 (isa<ConstantAggregateZero>(V) && !V->getType()->isVectorTy());
324}
325
327 if (isa<PHINode>(I))
328 B.SetInsertPoint(I->getParent()->getFirstNonPHIOrDbgOrAlloca());
329 else
330 B.SetInsertPoint(I);
331}
332
334 B.SetCurrentDebugLocation(I->getDebugLoc());
335 if (I->getType()->isVoidTy())
336 B.SetInsertPoint(I->getNextNode());
337 else
338 B.SetInsertPoint(*I->getInsertionPointAfterDef());
339}
340
342 if (const auto *Intr = dyn_cast<IntrinsicInst>(I)) {
343 switch (Intr->getIntrinsicID()) {
344 case Intrinsic::invariant_start:
345 case Intrinsic::invariant_end:
346 return false;
347 }
348 }
349 return true;
350}
351
352static inline void reportFatalOnTokenType(const Instruction *I) {
353 if (I->getType()->isTokenTy())
354 report_fatal_error("A token is encountered but SPIR-V without extensions "
355 "does not support token type",
356 false);
357}
358
360 if (!I->hasName() || I->getType()->isAggregateType() ||
361 expectIgnoredInIRTranslation(I))
362 return;
365 LLVMContext &Ctx = I->getContext();
366 std::vector<Value *> Args = {
368 Ctx, MDNode::get(Ctx, MDString::get(Ctx, I->getName())))};
369 B.CreateIntrinsic(Intrinsic::spv_assign_name, {I->getType()}, Args);
370}
371
372void SPIRVEmitIntrinsics::replaceAllUsesWith(Value *Src, Value *Dest,
373 bool DeleteOld) {
374 GR->replaceAllUsesWith(Src, Dest, DeleteOld);
375 // Update uncomplete type records if any
376 if (isTodoType(Src)) {
377 if (DeleteOld)
378 eraseTodoType(Src);
379 insertTodoType(Dest);
380 }
381}
382
383void SPIRVEmitIntrinsics::replaceAllUsesWithAndErase(IRBuilder<> &B,
384 Instruction *Src,
385 Instruction *Dest,
386 bool DeleteOld) {
387 replaceAllUsesWith(Src, Dest, DeleteOld);
388 std::string Name = Src->hasName() ? Src->getName().str() : "";
389 Src->eraseFromParent();
390 if (!Name.empty()) {
391 Dest->setName(Name);
392 if (Named.insert(Dest).second)
393 emitAssignName(Dest, B);
394 }
395}
396
398 return SI && F->getCallingConv() == CallingConv::SPIR_KERNEL &&
399 isPointerTy(SI->getValueOperand()->getType()) &&
400 isa<Argument>(SI->getValueOperand());
401}
402
403// Maybe restore original function return type.
405 Type *Ty) {
407 if (!CI || CI->isIndirectCall() || CI->isInlineAsm() ||
409 return Ty;
410 if (Type *OriginalTy = GR->findMutated(CI->getCalledFunction()))
411 return OriginalTy;
412 return Ty;
413}
414
415// Reconstruct type with nested element types according to deduced type info.
416// Return nullptr if no detailed type info is available.
417Type *SPIRVEmitIntrinsics::reconstructType(Value *Op, bool UnknownElemTypeI8,
418 bool IsPostprocessing) {
419 Type *Ty = Op->getType();
420 if (auto *OpI = dyn_cast<Instruction>(Op))
421 Ty = restoreMutatedType(GR, OpI, Ty);
422 if (!isUntypedPointerTy(Ty))
423 return Ty;
424 // try to find the pointee type
425 if (Type *NestedTy = GR->findDeducedElementType(Op))
427 // not a pointer according to the type info (e.g., Event object)
428 CallInst *CI = GR->findAssignPtrTypeInstr(Op);
429 if (CI) {
430 MetadataAsValue *MD = cast<MetadataAsValue>(CI->getArgOperand(1));
431 return cast<ConstantAsMetadata>(MD->getMetadata())->getType();
432 }
433 if (UnknownElemTypeI8) {
434 if (!IsPostprocessing)
435 insertTodoType(Op);
436 return getTypedPointerWrapper(IntegerType::getInt8Ty(Op->getContext()),
438 }
439 return nullptr;
440}
441
442CallInst *SPIRVEmitIntrinsics::buildSpvPtrcast(Function *F, Value *Op,
443 Type *ElemTy) {
444 IRBuilder<> B(Op->getContext());
445 if (auto *OpI = dyn_cast<Instruction>(Op)) {
446 // spv_ptrcast's argument Op denotes an instruction that generates
447 // a value, and we may use getInsertionPointAfterDef()
449 } else if (auto *OpA = dyn_cast<Argument>(Op)) {
450 B.SetInsertPointPastAllocas(OpA->getParent());
451 B.SetCurrentDebugLocation(DebugLoc());
452 } else {
453 B.SetInsertPoint(F->getEntryBlock().getFirstNonPHIOrDbgOrAlloca());
454 }
455 Type *OpTy = Op->getType();
456 SmallVector<Type *, 2> Types = {OpTy, OpTy};
457 SmallVector<Value *, 2> Args = {Op, buildMD(getNormalizedPoisonValue(ElemTy)),
458 B.getInt32(getPointerAddressSpace(OpTy))};
459 CallInst *PtrCasted =
460 B.CreateIntrinsic(Intrinsic::spv_ptrcast, {Types}, Args);
461 GR->buildAssignPtr(B, ElemTy, PtrCasted);
462 return PtrCasted;
463}
464
465void SPIRVEmitIntrinsics::replaceUsesOfWithSpvPtrcast(
466 Value *Op, Type *ElemTy, Instruction *I,
467 DenseMap<Function *, CallInst *> Ptrcasts) {
468 Function *F = I->getParent()->getParent();
469 CallInst *PtrCastedI = nullptr;
470 auto It = Ptrcasts.find(F);
471 if (It == Ptrcasts.end()) {
472 PtrCastedI = buildSpvPtrcast(F, Op, ElemTy);
473 Ptrcasts[F] = PtrCastedI;
474 } else {
475 PtrCastedI = It->second;
476 }
477 I->replaceUsesOfWith(Op, PtrCastedI);
478}
479
480void SPIRVEmitIntrinsics::propagateElemType(
481 Value *Op, Type *ElemTy,
482 DenseSet<std::pair<Value *, Value *>> &VisitedSubst) {
483 DenseMap<Function *, CallInst *> Ptrcasts;
484 SmallVector<User *> Users(Op->users());
485 for (auto *U : Users) {
486 if (!isa<Instruction>(U) || isSpvIntrinsic(U))
487 continue;
488 if (!VisitedSubst.insert(std::make_pair(U, Op)).second)
489 continue;
491 // If the instruction was validated already, we need to keep it valid by
492 // keeping current Op type.
493 if (isa<GetElementPtrInst>(UI) ||
494 TypeValidated.find(UI) != TypeValidated.end())
495 replaceUsesOfWithSpvPtrcast(Op, ElemTy, UI, Ptrcasts);
496 }
497}
498
499void SPIRVEmitIntrinsics::propagateElemTypeRec(
500 Value *Op, Type *PtrElemTy, Type *CastElemTy,
501 DenseSet<std::pair<Value *, Value *>> &VisitedSubst) {
502 std::unordered_set<Value *> Visited;
503 DenseMap<Function *, CallInst *> Ptrcasts;
504 propagateElemTypeRec(Op, PtrElemTy, CastElemTy, VisitedSubst, Visited,
505 std::move(Ptrcasts));
506}
507
508void SPIRVEmitIntrinsics::propagateElemTypeRec(
509 Value *Op, Type *PtrElemTy, Type *CastElemTy,
510 DenseSet<std::pair<Value *, Value *>> &VisitedSubst,
511 std::unordered_set<Value *> &Visited,
512 DenseMap<Function *, CallInst *> Ptrcasts) {
513 if (!Visited.insert(Op).second)
514 return;
515 SmallVector<User *> Users(Op->users());
516 for (auto *U : Users) {
517 if (!isa<Instruction>(U) || isSpvIntrinsic(U))
518 continue;
519 if (!VisitedSubst.insert(std::make_pair(U, Op)).second)
520 continue;
522 // If the instruction was validated already, we need to keep it valid by
523 // keeping current Op type.
524 if (isa<GetElementPtrInst>(UI) ||
525 TypeValidated.find(UI) != TypeValidated.end())
526 replaceUsesOfWithSpvPtrcast(Op, CastElemTy, UI, Ptrcasts);
527 }
528}
529
530// Set element pointer type to the given value of ValueTy and tries to
531// specify this type further (recursively) by Operand value, if needed.
532
533Type *
534SPIRVEmitIntrinsics::deduceElementTypeByValueDeep(Type *ValueTy, Value *Operand,
535 bool UnknownElemTypeI8) {
536 std::unordered_set<Value *> Visited;
537 return deduceElementTypeByValueDeep(ValueTy, Operand, Visited,
538 UnknownElemTypeI8);
539}
540
541Type *SPIRVEmitIntrinsics::deduceElementTypeByValueDeep(
542 Type *ValueTy, Value *Operand, std::unordered_set<Value *> &Visited,
543 bool UnknownElemTypeI8) {
544 Type *Ty = ValueTy;
545 if (Operand) {
546 if (auto *PtrTy = dyn_cast<PointerType>(Ty)) {
547 if (Type *NestedTy =
548 deduceElementTypeHelper(Operand, Visited, UnknownElemTypeI8))
549 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
550 } else {
551 Ty = deduceNestedTypeHelper(dyn_cast<User>(Operand), Ty, Visited,
552 UnknownElemTypeI8);
553 }
554 }
555 return Ty;
556}
557
558// Traverse User instructions to deduce an element pointer type of the operand.
559Type *SPIRVEmitIntrinsics::deduceElementTypeByUsersDeep(
560 Value *Op, std::unordered_set<Value *> &Visited, bool UnknownElemTypeI8) {
561 if (!Op || !isPointerTy(Op->getType()) || isa<ConstantPointerNull>(Op) ||
563 return nullptr;
564
565 if (auto ElemTy = getPointeeType(Op->getType()))
566 return ElemTy;
567
568 // maybe we already know operand's element type
569 if (Type *KnownTy = GR->findDeducedElementType(Op))
570 return KnownTy;
571
572 for (User *OpU : Op->users()) {
573 if (Instruction *Inst = dyn_cast<Instruction>(OpU)) {
574 if (Type *Ty = deduceElementTypeHelper(Inst, Visited, UnknownElemTypeI8))
575 return Ty;
576 }
577 }
578 return nullptr;
579}
580
581// Implements what we know in advance about intrinsics and builtin calls
582// TODO: consider feasibility of this particular case to be generalized by
583// encoding knowledge about intrinsics and builtin calls by corresponding
584// specification rules
586 Function *CalledF, unsigned OpIdx) {
587 if ((DemangledName.starts_with("__spirv_ocl_printf(") ||
588 DemangledName.starts_with("printf(")) &&
589 OpIdx == 0)
590 return IntegerType::getInt8Ty(CalledF->getContext());
591 return nullptr;
592}
593
594// Deduce and return a successfully deduced Type of the Instruction,
595// or nullptr otherwise.
596Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(Value *I,
597 bool UnknownElemTypeI8) {
598 std::unordered_set<Value *> Visited;
599 return deduceElementTypeHelper(I, Visited, UnknownElemTypeI8);
600}
601
602void SPIRVEmitIntrinsics::maybeAssignPtrType(Type *&Ty, Value *Op, Type *RefTy,
603 bool UnknownElemTypeI8) {
604 if (isUntypedPointerTy(RefTy)) {
605 if (!UnknownElemTypeI8)
606 return;
607 insertTodoType(Op);
608 }
609 Ty = RefTy;
610}
611
612bool SPIRVEmitIntrinsics::walkLogicalAccessChain(
613 GetElementPtrInst &GEP,
614 const std::function<void(Type *, uint64_t)> &OnLiteralIndexing,
615 const std::function<void(Type *, Value *)> &OnDynamicIndexing) {
616 // We only rewrite i8* GEP. Other should be left as-is.
617 // Valid i8* GEP must always have a single index.
618 assert(GEP.getSourceElementType() ==
619 IntegerType::getInt8Ty(CurrF->getContext()));
620 assert(GEP.getNumIndices() == 1);
621
622 auto &DL = CurrF->getDataLayout();
623 Value *Src = getPointerRoot(GEP.getPointerOperand());
624 Type *CurType = deduceElementType(Src, true);
625
626 Value *Operand = *GEP.idx_begin();
627 ConstantInt *CI = dyn_cast<ConstantInt>(Operand);
628 if (!CI) {
629 ArrayType *AT = dyn_cast<ArrayType>(CurType);
630 // Operand is not constant. Either we have an array and accept it, or we
631 // give up.
632 if (AT)
633 OnDynamicIndexing(AT->getElementType(), Operand);
634 return AT == nullptr;
635 }
636
637 assert(CI);
638 uint64_t Offset = CI->getZExtValue();
639
640 do {
641 if (ArrayType *AT = dyn_cast<ArrayType>(CurType)) {
642 uint32_t EltTypeSize = DL.getTypeSizeInBits(AT->getElementType()) / 8;
643 assert(Offset < AT->getNumElements() * EltTypeSize);
644 uint64_t Index = Offset / EltTypeSize;
645 Offset = Offset - (Index * EltTypeSize);
646 CurType = AT->getElementType();
647 OnLiteralIndexing(CurType, Index);
648 } else if (StructType *ST = dyn_cast<StructType>(CurType)) {
649 uint32_t StructSize = DL.getTypeSizeInBits(ST) / 8;
650 assert(Offset < StructSize);
651 (void)StructSize;
652 const auto &STL = DL.getStructLayout(ST);
653 unsigned Element = STL->getElementContainingOffset(Offset);
654 Offset -= STL->getElementOffset(Element);
655 CurType = ST->getElementType(Element);
656 OnLiteralIndexing(CurType, Element);
657 } else {
658 // Vector type indexing should not use GEP.
659 // So if we have an index left, something is wrong. Giving up.
660 return true;
661 }
662 } while (Offset > 0);
663
664 return false;
665}
666
668SPIRVEmitIntrinsics::buildLogicalAccessChainFromGEP(GetElementPtrInst &GEP) {
669 auto &DL = CurrF->getDataLayout();
670 IRBuilder<> B(GEP.getParent());
671 B.SetInsertPoint(&GEP);
672
673 std::vector<Value *> Indices;
674 Indices.push_back(ConstantInt::get(
675 IntegerType::getInt32Ty(CurrF->getContext()), 0, /* Signed= */ false));
676 walkLogicalAccessChain(
677 GEP,
678 [&Indices, &B](Type *EltType, uint64_t Index) {
679 Indices.push_back(
680 ConstantInt::get(B.getInt64Ty(), Index, /* Signed= */ false));
681 },
682 [&Indices, &B, &DL](Type *EltType, Value *Offset) {
683 uint32_t EltTypeSize = DL.getTypeSizeInBits(EltType) / 8;
684 Value *Index = B.CreateUDiv(
685 Offset, ConstantInt::get(Offset->getType(), EltTypeSize,
686 /* Signed= */ false));
687 Indices.push_back(Index);
688 });
689
690 SmallVector<Type *, 2> Types = {GEP.getType(), GEP.getOperand(0)->getType()};
692 Args.push_back(B.getInt1(GEP.isInBounds()));
693 Args.push_back(GEP.getOperand(0));
694 llvm::append_range(Args, Indices);
695 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args});
696 replaceAllUsesWithAndErase(B, &GEP, NewI);
697 return NewI;
698}
699
700Type *SPIRVEmitIntrinsics::getGEPTypeLogical(GetElementPtrInst *GEP) {
701
702 Type *CurType = GEP->getResultElementType();
703
704 bool Interrupted = walkLogicalAccessChain(
705 *GEP, [&CurType](Type *EltType, uint64_t Index) { CurType = EltType; },
706 [&CurType](Type *EltType, Value *Index) { CurType = EltType; });
707
708 return Interrupted ? GEP->getResultElementType() : CurType;
709}
710
711Type *SPIRVEmitIntrinsics::getGEPType(GetElementPtrInst *Ref) {
712 if (Ref->getSourceElementType() ==
713 IntegerType::getInt8Ty(CurrF->getContext()) &&
715 return getGEPTypeLogical(Ref);
716 }
717
718 Type *Ty = nullptr;
719 // TODO: not sure if GetElementPtrInst::getTypeAtIndex() does anything
720 // useful here
721 if (isNestedPointer(Ref->getSourceElementType())) {
722 Ty = Ref->getSourceElementType();
723 for (Use &U : drop_begin(Ref->indices()))
724 Ty = GetElementPtrInst::getTypeAtIndex(Ty, U.get());
725 } else {
726 Ty = Ref->getResultElementType();
727 }
728 return Ty;
729}
730
731Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(
732 Value *I, std::unordered_set<Value *> &Visited, bool UnknownElemTypeI8,
733 bool IgnoreKnownType) {
734 // allow to pass nullptr as an argument
735 if (!I)
736 return nullptr;
737
738 // maybe already known
739 if (!IgnoreKnownType)
740 if (Type *KnownTy = GR->findDeducedElementType(I))
741 return KnownTy;
742
743 // maybe a cycle
744 if (!Visited.insert(I).second)
745 return nullptr;
746
747 // fallback value in case when we fail to deduce a type
748 Type *Ty = nullptr;
749 // look for known basic patterns of type inference
750 if (auto *Ref = dyn_cast<AllocaInst>(I)) {
751 maybeAssignPtrType(Ty, I, Ref->getAllocatedType(), UnknownElemTypeI8);
752 } else if (auto *Ref = dyn_cast<GetElementPtrInst>(I)) {
753 Ty = getGEPType(Ref);
754 } else if (auto *Ref = dyn_cast<LoadInst>(I)) {
755 Value *Op = Ref->getPointerOperand();
756 Type *KnownTy = GR->findDeducedElementType(Op);
757 if (!KnownTy)
758 KnownTy = Op->getType();
759 if (Type *ElemTy = getPointeeType(KnownTy))
760 maybeAssignPtrType(Ty, I, ElemTy, UnknownElemTypeI8);
761 } else if (auto *Ref = dyn_cast<GlobalValue>(I)) {
762 Ty = deduceElementTypeByValueDeep(
763 Ref->getValueType(),
764 Ref->getNumOperands() > 0 ? Ref->getOperand(0) : nullptr, Visited,
765 UnknownElemTypeI8);
766 } else if (auto *Ref = dyn_cast<AddrSpaceCastInst>(I)) {
767 Type *RefTy = deduceElementTypeHelper(Ref->getPointerOperand(), Visited,
768 UnknownElemTypeI8);
769 maybeAssignPtrType(Ty, I, RefTy, UnknownElemTypeI8);
770 } else if (auto *Ref = dyn_cast<IntToPtrInst>(I)) {
771 maybeAssignPtrType(Ty, I, Ref->getDestTy(), UnknownElemTypeI8);
772 } else if (auto *Ref = dyn_cast<BitCastInst>(I)) {
773 if (Type *Src = Ref->getSrcTy(), *Dest = Ref->getDestTy();
774 isPointerTy(Src) && isPointerTy(Dest))
775 Ty = deduceElementTypeHelper(Ref->getOperand(0), Visited,
776 UnknownElemTypeI8);
777 } else if (auto *Ref = dyn_cast<AtomicCmpXchgInst>(I)) {
778 Value *Op = Ref->getNewValOperand();
779 if (isPointerTy(Op->getType()))
780 Ty = deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8);
781 } else if (auto *Ref = dyn_cast<AtomicRMWInst>(I)) {
782 Value *Op = Ref->getValOperand();
783 if (isPointerTy(Op->getType()))
784 Ty = deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8);
785 } else if (auto *Ref = dyn_cast<PHINode>(I)) {
786 Type *BestTy = nullptr;
787 unsigned MaxN = 1;
788 DenseMap<Type *, unsigned> PhiTys;
789 for (int i = Ref->getNumIncomingValues() - 1; i >= 0; --i) {
790 Ty = deduceElementTypeByUsersDeep(Ref->getIncomingValue(i), Visited,
791 UnknownElemTypeI8);
792 if (!Ty)
793 continue;
794 auto It = PhiTys.try_emplace(Ty, 1);
795 if (!It.second) {
796 ++It.first->second;
797 if (It.first->second > MaxN) {
798 MaxN = It.first->second;
799 BestTy = Ty;
800 }
801 }
802 }
803 if (BestTy)
804 Ty = BestTy;
805 } else if (auto *Ref = dyn_cast<SelectInst>(I)) {
806 for (Value *Op : {Ref->getTrueValue(), Ref->getFalseValue()}) {
807 Ty = deduceElementTypeByUsersDeep(Op, Visited, UnknownElemTypeI8);
808 if (Ty)
809 break;
810 }
811 } else if (auto *CI = dyn_cast<CallInst>(I)) {
812 static StringMap<unsigned> ResTypeByArg = {
813 {"to_global", 0},
814 {"to_local", 0},
815 {"to_private", 0},
816 {"__spirv_GenericCastToPtr_ToGlobal", 0},
817 {"__spirv_GenericCastToPtr_ToLocal", 0},
818 {"__spirv_GenericCastToPtr_ToPrivate", 0},
819 {"__spirv_GenericCastToPtrExplicit_ToGlobal", 0},
820 {"__spirv_GenericCastToPtrExplicit_ToLocal", 0},
821 {"__spirv_GenericCastToPtrExplicit_ToPrivate", 0}};
822 // TODO: maybe improve performance by caching demangled names
823
825 if (II && II->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
826 auto *HandleType = cast<TargetExtType>(II->getOperand(0)->getType());
827 if (HandleType->getTargetExtName() == "spirv.Image" ||
828 HandleType->getTargetExtName() == "spirv.SignedImage") {
829 for (User *U : II->users()) {
830 Ty = cast<Instruction>(U)->getAccessType();
831 if (Ty)
832 break;
833 }
834 } else if (HandleType->getTargetExtName() == "spirv.VulkanBuffer") {
835 // This call is supposed to index into an array
836 Ty = HandleType->getTypeParameter(0);
837 if (Ty->isArrayTy())
838 Ty = Ty->getArrayElementType();
839 else {
840 assert(Ty && Ty->isStructTy());
841 uint32_t Index = cast<ConstantInt>(II->getOperand(1))->getZExtValue();
842 Ty = cast<StructType>(Ty)->getElementType(Index);
843 }
844 } else {
845 llvm_unreachable("Unknown handle type for spv_resource_getpointer.");
846 }
847 } else if (II && II->getIntrinsicID() ==
848 Intrinsic::spv_generic_cast_to_ptr_explicit) {
849 Ty = deduceElementTypeHelper(CI->getArgOperand(0), Visited,
850 UnknownElemTypeI8);
851 } else if (Function *CalledF = CI->getCalledFunction()) {
852 std::string DemangledName =
853 getOclOrSpirvBuiltinDemangledName(CalledF->getName());
854 if (DemangledName.length() > 0)
855 DemangledName = SPIRV::lookupBuiltinNameHelper(DemangledName);
856 auto AsArgIt = ResTypeByArg.find(DemangledName);
857 if (AsArgIt != ResTypeByArg.end())
858 Ty = deduceElementTypeHelper(CI->getArgOperand(AsArgIt->second),
859 Visited, UnknownElemTypeI8);
860 else if (Type *KnownRetTy = GR->findDeducedElementType(CalledF))
861 Ty = KnownRetTy;
862 }
863 }
864
865 // remember the found relationship
866 if (Ty && !IgnoreKnownType) {
867 // specify nested types if needed, otherwise return unchanged
869 }
870
871 return Ty;
872}
873
874// Re-create a type of the value if it has untyped pointer fields, also nested.
875// Return the original value type if no corrections of untyped pointer
876// information is found or needed.
877Type *SPIRVEmitIntrinsics::deduceNestedTypeHelper(User *U,
878 bool UnknownElemTypeI8) {
879 std::unordered_set<Value *> Visited;
880 return deduceNestedTypeHelper(U, U->getType(), Visited, UnknownElemTypeI8);
881}
882
883Type *SPIRVEmitIntrinsics::deduceNestedTypeHelper(
884 User *U, Type *OrigTy, std::unordered_set<Value *> &Visited,
885 bool UnknownElemTypeI8) {
886 if (!U)
887 return OrigTy;
888
889 // maybe already known
890 if (Type *KnownTy = GR->findDeducedCompositeType(U))
891 return KnownTy;
892
893 // maybe a cycle
894 if (!Visited.insert(U).second)
895 return OrigTy;
896
897 if (isa<StructType>(OrigTy)) {
899 bool Change = false;
900 for (unsigned i = 0; i < U->getNumOperands(); ++i) {
901 Value *Op = U->getOperand(i);
902 assert(Op && "Operands should not be null.");
903 Type *OpTy = Op->getType();
904 Type *Ty = OpTy;
905 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
906 if (Type *NestedTy =
907 deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8))
908 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
909 } else {
910 Ty = deduceNestedTypeHelper(dyn_cast<User>(Op), OpTy, Visited,
911 UnknownElemTypeI8);
912 }
913 Tys.push_back(Ty);
914 Change |= Ty != OpTy;
915 }
916 if (Change) {
917 Type *NewTy = StructType::create(Tys);
918 GR->addDeducedCompositeType(U, NewTy);
919 return NewTy;
920 }
921 } else if (auto *ArrTy = dyn_cast<ArrayType>(OrigTy)) {
922 if (Value *Op = U->getNumOperands() > 0 ? U->getOperand(0) : nullptr) {
923 Type *OpTy = ArrTy->getElementType();
924 Type *Ty = OpTy;
925 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
926 if (Type *NestedTy =
927 deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8))
928 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
929 } else {
930 Ty = deduceNestedTypeHelper(dyn_cast<User>(Op), OpTy, Visited,
931 UnknownElemTypeI8);
932 }
933 if (Ty != OpTy) {
934 Type *NewTy = ArrayType::get(Ty, ArrTy->getNumElements());
935 GR->addDeducedCompositeType(U, NewTy);
936 return NewTy;
937 }
938 }
939 } else if (auto *VecTy = dyn_cast<VectorType>(OrigTy)) {
940 if (Value *Op = U->getNumOperands() > 0 ? U->getOperand(0) : nullptr) {
941 Type *OpTy = VecTy->getElementType();
942 Type *Ty = OpTy;
943 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
944 if (Type *NestedTy =
945 deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8))
946 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
947 } else {
948 Ty = deduceNestedTypeHelper(dyn_cast<User>(Op), OpTy, Visited,
949 UnknownElemTypeI8);
950 }
951 if (Ty != OpTy) {
952 Type *NewTy = VectorType::get(Ty, VecTy->getElementCount());
954 return NewTy;
955 }
956 }
957 }
958
959 return OrigTy;
960}
961
962Type *SPIRVEmitIntrinsics::deduceElementType(Value *I, bool UnknownElemTypeI8) {
963 if (Type *Ty = deduceElementTypeHelper(I, UnknownElemTypeI8))
964 return Ty;
965 if (!UnknownElemTypeI8)
966 return nullptr;
967 insertTodoType(I);
968 return IntegerType::getInt8Ty(I->getContext());
969}
970
972 Value *PointerOperand) {
973 Type *PointeeTy = GR->findDeducedElementType(PointerOperand);
974 if (PointeeTy && !isUntypedPointerTy(PointeeTy))
975 return nullptr;
976 auto *PtrTy = dyn_cast<PointerType>(I->getType());
977 if (!PtrTy)
978 return I->getType();
979 if (Type *NestedTy = GR->findDeducedElementType(I))
980 return getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
981 return nullptr;
982}
983
984// Try to deduce element type for a call base. Returns false if this is an
985// indirect function invocation, and true otherwise.
986bool SPIRVEmitIntrinsics::deduceOperandElementTypeCalledFunction(
987 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
988 Type *&KnownElemTy, bool &Incomplete) {
989 Function *CalledF = CI->getCalledFunction();
990 if (!CalledF)
991 return false;
992 std::string DemangledName =
994 if (DemangledName.length() > 0 &&
995 !StringRef(DemangledName).starts_with("llvm.")) {
996 const SPIRVSubtarget &ST = TM->getSubtarget<SPIRVSubtarget>(*CalledF);
997 auto [Grp, Opcode, ExtNo] = SPIRV::mapBuiltinToOpcode(
998 DemangledName, ST.getPreferredInstructionSet());
999 if (Opcode == SPIRV::OpGroupAsyncCopy) {
1000 for (unsigned i = 0, PtrCnt = 0; i < CI->arg_size() && PtrCnt < 2; ++i) {
1001 Value *Op = CI->getArgOperand(i);
1002 if (!isPointerTy(Op->getType()))
1003 continue;
1004 ++PtrCnt;
1005 if (Type *ElemTy = GR->findDeducedElementType(Op))
1006 KnownElemTy = ElemTy; // src will rewrite dest if both are defined
1007 Ops.push_back(std::make_pair(Op, i));
1008 }
1009 } else if (Grp == SPIRV::Atomic || Grp == SPIRV::AtomicFloating) {
1010 if (CI->arg_size() == 0)
1011 return true;
1012 Value *Op = CI->getArgOperand(0);
1013 if (!isPointerTy(Op->getType()))
1014 return true;
1015 switch (Opcode) {
1016 case SPIRV::OpAtomicFAddEXT:
1017 case SPIRV::OpAtomicFMinEXT:
1018 case SPIRV::OpAtomicFMaxEXT:
1019 case SPIRV::OpAtomicLoad:
1020 case SPIRV::OpAtomicCompareExchangeWeak:
1021 case SPIRV::OpAtomicCompareExchange:
1022 case SPIRV::OpAtomicExchange:
1023 case SPIRV::OpAtomicIAdd:
1024 case SPIRV::OpAtomicISub:
1025 case SPIRV::OpAtomicOr:
1026 case SPIRV::OpAtomicXor:
1027 case SPIRV::OpAtomicAnd:
1028 case SPIRV::OpAtomicUMin:
1029 case SPIRV::OpAtomicUMax:
1030 case SPIRV::OpAtomicSMin:
1031 case SPIRV::OpAtomicSMax: {
1032 KnownElemTy = isPointerTy(CI->getType()) ? getAtomicElemTy(GR, CI, Op)
1033 : CI->getType();
1034 if (!KnownElemTy)
1035 return true;
1036 Incomplete = isTodoType(Op);
1037 Ops.push_back(std::make_pair(Op, 0));
1038 } break;
1039 case SPIRV::OpAtomicStore: {
1040 if (CI->arg_size() < 4)
1041 return true;
1042 Value *ValOp = CI->getArgOperand(3);
1043 KnownElemTy = isPointerTy(ValOp->getType())
1044 ? getAtomicElemTy(GR, CI, Op)
1045 : ValOp->getType();
1046 if (!KnownElemTy)
1047 return true;
1048 Incomplete = isTodoType(Op);
1049 Ops.push_back(std::make_pair(Op, 0));
1050 } break;
1051 }
1052 }
1053 }
1054 return true;
1055}
1056
1057// Try to deduce element type for a function pointer.
1058void SPIRVEmitIntrinsics::deduceOperandElementTypeFunctionPointer(
1059 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
1060 Type *&KnownElemTy, bool IsPostprocessing) {
1061 Value *Op = CI->getCalledOperand();
1062 if (!Op || !isPointerTy(Op->getType()))
1063 return;
1064 Ops.push_back(std::make_pair(Op, std::numeric_limits<unsigned>::max()));
1065 FunctionType *FTy = CI->getFunctionType();
1066 bool IsNewFTy = false, IsIncomplete = false;
1068 for (Value *Arg : CI->args()) {
1069 Type *ArgTy = Arg->getType();
1070 if (ArgTy->isPointerTy()) {
1071 if (Type *ElemTy = GR->findDeducedElementType(Arg)) {
1072 IsNewFTy = true;
1073 ArgTy = getTypedPointerWrapper(ElemTy, getPointerAddressSpace(ArgTy));
1074 if (isTodoType(Arg))
1075 IsIncomplete = true;
1076 } else {
1077 IsIncomplete = true;
1078 }
1079 }
1080 ArgTys.push_back(ArgTy);
1081 }
1082 Type *RetTy = FTy->getReturnType();
1083 if (CI->getType()->isPointerTy()) {
1084 if (Type *ElemTy = GR->findDeducedElementType(CI)) {
1085 IsNewFTy = true;
1086 RetTy =
1088 if (isTodoType(CI))
1089 IsIncomplete = true;
1090 } else {
1091 IsIncomplete = true;
1092 }
1093 }
1094 if (!IsPostprocessing && IsIncomplete)
1095 insertTodoType(Op);
1096 KnownElemTy =
1097 IsNewFTy ? FunctionType::get(RetTy, ArgTys, FTy->isVarArg()) : FTy;
1098}
1099
1100bool SPIRVEmitIntrinsics::deduceOperandElementTypeFunctionRet(
1101 Instruction *I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
1102 const SmallPtrSet<Value *, 4> *AskOps, bool IsPostprocessing,
1103 Type *&KnownElemTy, Value *Op, Function *F) {
1104 KnownElemTy = GR->findDeducedElementType(F);
1105 if (KnownElemTy)
1106 return false;
1107 if (Type *OpElemTy = GR->findDeducedElementType(Op)) {
1108 OpElemTy = normalizeType(OpElemTy);
1109 GR->addDeducedElementType(F, OpElemTy);
1110 GR->addReturnType(
1111 F, TypedPointerType::get(OpElemTy,
1112 getPointerAddressSpace(F->getReturnType())));
1113 // non-recursive update of types in function uses
1114 DenseSet<std::pair<Value *, Value *>> VisitedSubst{std::make_pair(I, Op)};
1115 for (User *U : F->users()) {
1116 CallInst *CI = dyn_cast<CallInst>(U);
1117 if (!CI || CI->getCalledFunction() != F)
1118 continue;
1119 if (CallInst *AssignCI = GR->findAssignPtrTypeInstr(CI)) {
1120 if (Type *PrevElemTy = GR->findDeducedElementType(CI)) {
1121 GR->updateAssignType(AssignCI, CI,
1122 getNormalizedPoisonValue(OpElemTy));
1123 propagateElemType(CI, PrevElemTy, VisitedSubst);
1124 }
1125 }
1126 }
1127 // Non-recursive update of types in the function uncomplete returns.
1128 // This may happen just once per a function, the latch is a pair of
1129 // findDeducedElementType(F) / addDeducedElementType(F, ...).
1130 // With or without the latch it is a non-recursive call due to
1131 // IncompleteRets set to nullptr in this call.
1132 if (IncompleteRets)
1133 for (Instruction *IncompleteRetI : *IncompleteRets)
1134 deduceOperandElementType(IncompleteRetI, nullptr, AskOps,
1135 IsPostprocessing);
1136 } else if (IncompleteRets) {
1137 IncompleteRets->insert(I);
1138 }
1139 TypeValidated.insert(I);
1140 return true;
1141}
1142
1143// If the Instruction has Pointer operands with unresolved types, this function
1144// tries to deduce them. If the Instruction has Pointer operands with known
1145// types which differ from expected, this function tries to insert a bitcast to
1146// resolve the issue.
1147void SPIRVEmitIntrinsics::deduceOperandElementType(
1148 Instruction *I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
1149 const SmallPtrSet<Value *, 4> *AskOps, bool IsPostprocessing) {
1151 Type *KnownElemTy = nullptr;
1152 bool Incomplete = false;
1153 // look for known basic patterns of type inference
1154 if (auto *Ref = dyn_cast<PHINode>(I)) {
1155 if (!isPointerTy(I->getType()) ||
1156 !(KnownElemTy = GR->findDeducedElementType(I)))
1157 return;
1158 Incomplete = isTodoType(I);
1159 for (unsigned i = 0; i < Ref->getNumIncomingValues(); i++) {
1160 Value *Op = Ref->getIncomingValue(i);
1161 if (isPointerTy(Op->getType()))
1162 Ops.push_back(std::make_pair(Op, i));
1163 }
1164 } else if (auto *Ref = dyn_cast<AddrSpaceCastInst>(I)) {
1165 KnownElemTy = GR->findDeducedElementType(I);
1166 if (!KnownElemTy)
1167 return;
1168 Incomplete = isTodoType(I);
1169 Ops.push_back(std::make_pair(Ref->getPointerOperand(), 0));
1170 } else if (auto *Ref = dyn_cast<BitCastInst>(I)) {
1171 if (!isPointerTy(I->getType()))
1172 return;
1173 KnownElemTy = GR->findDeducedElementType(I);
1174 if (!KnownElemTy)
1175 return;
1176 Incomplete = isTodoType(I);
1177 Ops.push_back(std::make_pair(Ref->getOperand(0), 0));
1178 } else if (auto *Ref = dyn_cast<GetElementPtrInst>(I)) {
1179 if (GR->findDeducedElementType(Ref->getPointerOperand()))
1180 return;
1181 KnownElemTy = Ref->getSourceElementType();
1182 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1184 } else if (auto *Ref = dyn_cast<LoadInst>(I)) {
1185 KnownElemTy = I->getType();
1186 if (isUntypedPointerTy(KnownElemTy))
1187 return;
1188 Type *PointeeTy = GR->findDeducedElementType(Ref->getPointerOperand());
1189 if (PointeeTy && !isUntypedPointerTy(PointeeTy))
1190 return;
1191 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1193 } else if (auto *Ref = dyn_cast<StoreInst>(I)) {
1194 if (!(KnownElemTy =
1195 reconstructType(Ref->getValueOperand(), false, IsPostprocessing)))
1196 return;
1197 Type *PointeeTy = GR->findDeducedElementType(Ref->getPointerOperand());
1198 if (PointeeTy && !isUntypedPointerTy(PointeeTy))
1199 return;
1200 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1202 } else if (auto *Ref = dyn_cast<AtomicCmpXchgInst>(I)) {
1203 KnownElemTy = isPointerTy(I->getType())
1204 ? getAtomicElemTy(GR, I, Ref->getPointerOperand())
1205 : I->getType();
1206 if (!KnownElemTy)
1207 return;
1208 Incomplete = isTodoType(Ref->getPointerOperand());
1209 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1211 } else if (auto *Ref = dyn_cast<AtomicRMWInst>(I)) {
1212 KnownElemTy = isPointerTy(I->getType())
1213 ? getAtomicElemTy(GR, I, Ref->getPointerOperand())
1214 : I->getType();
1215 if (!KnownElemTy)
1216 return;
1217 Incomplete = isTodoType(Ref->getPointerOperand());
1218 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1220 } else if (auto *Ref = dyn_cast<SelectInst>(I)) {
1221 if (!isPointerTy(I->getType()) ||
1222 !(KnownElemTy = GR->findDeducedElementType(I)))
1223 return;
1224 Incomplete = isTodoType(I);
1225 for (unsigned i = 0; i < Ref->getNumOperands(); i++) {
1226 Value *Op = Ref->getOperand(i);
1227 if (isPointerTy(Op->getType()))
1228 Ops.push_back(std::make_pair(Op, i));
1229 }
1230 } else if (auto *Ref = dyn_cast<ReturnInst>(I)) {
1231 if (!isPointerTy(CurrF->getReturnType()))
1232 return;
1233 Value *Op = Ref->getReturnValue();
1234 if (!Op)
1235 return;
1236 if (deduceOperandElementTypeFunctionRet(I, IncompleteRets, AskOps,
1237 IsPostprocessing, KnownElemTy, Op,
1238 CurrF))
1239 return;
1240 Incomplete = isTodoType(CurrF);
1241 Ops.push_back(std::make_pair(Op, 0));
1242 } else if (auto *Ref = dyn_cast<ICmpInst>(I)) {
1243 if (!isPointerTy(Ref->getOperand(0)->getType()))
1244 return;
1245 Value *Op0 = Ref->getOperand(0);
1246 Value *Op1 = Ref->getOperand(1);
1247 bool Incomplete0 = isTodoType(Op0);
1248 bool Incomplete1 = isTodoType(Op1);
1249 Type *ElemTy1 = GR->findDeducedElementType(Op1);
1250 Type *ElemTy0 = (Incomplete0 && !Incomplete1 && ElemTy1)
1251 ? nullptr
1252 : GR->findDeducedElementType(Op0);
1253 if (ElemTy0) {
1254 KnownElemTy = ElemTy0;
1255 Incomplete = Incomplete0;
1256 Ops.push_back(std::make_pair(Op1, 1));
1257 } else if (ElemTy1) {
1258 KnownElemTy = ElemTy1;
1259 Incomplete = Incomplete1;
1260 Ops.push_back(std::make_pair(Op0, 0));
1261 }
1262 } else if (CallInst *CI = dyn_cast<CallInst>(I)) {
1263 if (!CI->isIndirectCall())
1264 deduceOperandElementTypeCalledFunction(CI, Ops, KnownElemTy, Incomplete);
1265 else if (HaveFunPtrs)
1266 deduceOperandElementTypeFunctionPointer(CI, Ops, KnownElemTy,
1267 IsPostprocessing);
1268 }
1269
1270 // There is no enough info to deduce types or all is valid.
1271 if (!KnownElemTy || Ops.size() == 0)
1272 return;
1273
1274 LLVMContext &Ctx = CurrF->getContext();
1275 IRBuilder<> B(Ctx);
1276 for (auto &OpIt : Ops) {
1277 Value *Op = OpIt.first;
1278 if (AskOps && !AskOps->contains(Op))
1279 continue;
1280 Type *AskTy = nullptr;
1281 CallInst *AskCI = nullptr;
1282 if (IsPostprocessing && AskOps) {
1283 AskTy = GR->findDeducedElementType(Op);
1284 AskCI = GR->findAssignPtrTypeInstr(Op);
1285 assert(AskTy && AskCI);
1286 }
1287 Type *Ty = AskTy ? AskTy : GR->findDeducedElementType(Op);
1288 if (Ty == KnownElemTy)
1289 continue;
1290 Value *OpTyVal = getNormalizedPoisonValue(KnownElemTy);
1291 Type *OpTy = Op->getType();
1292 if (Op->hasUseList() &&
1293 (!Ty || AskTy || isUntypedPointerTy(Ty) || isTodoType(Op))) {
1294 Type *PrevElemTy = GR->findDeducedElementType(Op);
1295 GR->addDeducedElementType(Op, normalizeType(KnownElemTy));
1296 // check if KnownElemTy is complete
1297 if (!Incomplete)
1298 eraseTodoType(Op);
1299 else if (!IsPostprocessing)
1300 insertTodoType(Op);
1301 // check if there is existing Intrinsic::spv_assign_ptr_type instruction
1302 CallInst *AssignCI = AskCI ? AskCI : GR->findAssignPtrTypeInstr(Op);
1303 if (AssignCI == nullptr) {
1304 Instruction *User = dyn_cast<Instruction>(Op->use_begin()->get());
1305 setInsertPointSkippingPhis(B, User ? User->getNextNode() : I);
1306 CallInst *CI =
1307 buildIntrWithMD(Intrinsic::spv_assign_ptr_type, {OpTy}, OpTyVal, Op,
1308 {B.getInt32(getPointerAddressSpace(OpTy))}, B);
1309 GR->addAssignPtrTypeInstr(Op, CI);
1310 } else {
1311 GR->updateAssignType(AssignCI, Op, OpTyVal);
1312 DenseSet<std::pair<Value *, Value *>> VisitedSubst{
1313 std::make_pair(I, Op)};
1314 propagateElemTypeRec(Op, KnownElemTy, PrevElemTy, VisitedSubst);
1315 }
1316 } else {
1317 eraseTodoType(Op);
1318 CallInst *PtrCastI =
1319 buildSpvPtrcast(I->getParent()->getParent(), Op, KnownElemTy);
1320 if (OpIt.second == std::numeric_limits<unsigned>::max())
1321 dyn_cast<CallInst>(I)->setCalledOperand(PtrCastI);
1322 else
1323 I->setOperand(OpIt.second, PtrCastI);
1324 }
1325 }
1326 TypeValidated.insert(I);
1327}
1328
1329void SPIRVEmitIntrinsics::replaceMemInstrUses(Instruction *Old,
1330 Instruction *New,
1331 IRBuilder<> &B) {
1332 while (!Old->user_empty()) {
1333 auto *U = Old->user_back();
1334 if (isAssignTypeInstr(U)) {
1335 B.SetInsertPoint(U);
1336 SmallVector<Value *, 2> Args = {New, U->getOperand(1)};
1337 CallInst *AssignCI =
1338 B.CreateIntrinsic(Intrinsic::spv_assign_type, {New->getType()}, Args);
1339 GR->addAssignPtrTypeInstr(New, AssignCI);
1340 U->eraseFromParent();
1341 } else if (isMemInstrToReplace(U) || isa<ReturnInst>(U) ||
1342 isa<CallInst>(U)) {
1343 U->replaceUsesOfWith(Old, New);
1344 } else {
1345 llvm_unreachable("illegal aggregate intrinsic user");
1346 }
1347 }
1348 New->copyMetadata(*Old);
1349 Old->eraseFromParent();
1350}
1351
1352void SPIRVEmitIntrinsics::preprocessUndefs(IRBuilder<> &B) {
1353 std::queue<Instruction *> Worklist;
1354 for (auto &I : instructions(CurrF))
1355 Worklist.push(&I);
1356
1357 while (!Worklist.empty()) {
1358 Instruction *I = Worklist.front();
1359 bool BPrepared = false;
1360 Worklist.pop();
1361
1362 for (auto &Op : I->operands()) {
1363 auto *AggrUndef = dyn_cast<UndefValue>(Op);
1364 if (!AggrUndef || !Op->getType()->isAggregateType())
1365 continue;
1366
1367 if (!BPrepared) {
1369 BPrepared = true;
1370 }
1371 auto *IntrUndef = B.CreateIntrinsic(Intrinsic::spv_undef, {});
1372 Worklist.push(IntrUndef);
1373 I->replaceUsesOfWith(Op, IntrUndef);
1374 AggrConsts[IntrUndef] = AggrUndef;
1375 AggrConstTypes[IntrUndef] = AggrUndef->getType();
1376 }
1377 }
1378}
1379
1380void SPIRVEmitIntrinsics::preprocessCompositeConstants(IRBuilder<> &B) {
1381 std::queue<Instruction *> Worklist;
1382 for (auto &I : instructions(CurrF))
1383 Worklist.push(&I);
1384
1385 while (!Worklist.empty()) {
1386 auto *I = Worklist.front();
1387 bool IsPhi = isa<PHINode>(I), BPrepared = false;
1388 assert(I);
1389 bool KeepInst = false;
1390 for (const auto &Op : I->operands()) {
1391 Constant *AggrConst = nullptr;
1392 Type *ResTy = nullptr;
1393 if (auto *COp = dyn_cast<ConstantVector>(Op)) {
1394 AggrConst = COp;
1395 ResTy = COp->getType();
1396 } else if (auto *COp = dyn_cast<ConstantArray>(Op)) {
1397 AggrConst = COp;
1398 ResTy = B.getInt32Ty();
1399 } else if (auto *COp = dyn_cast<ConstantStruct>(Op)) {
1400 AggrConst = COp;
1401 ResTy = B.getInt32Ty();
1402 } else if (auto *COp = dyn_cast<ConstantDataArray>(Op)) {
1403 AggrConst = COp;
1404 ResTy = B.getInt32Ty();
1405 } else if (auto *COp = dyn_cast<ConstantAggregateZero>(Op)) {
1406 AggrConst = COp;
1407 ResTy = Op->getType()->isVectorTy() ? COp->getType() : B.getInt32Ty();
1408 }
1409 if (AggrConst) {
1411 if (auto *COp = dyn_cast<ConstantDataSequential>(Op))
1412 for (unsigned i = 0; i < COp->getNumElements(); ++i)
1413 Args.push_back(COp->getElementAsConstant(i));
1414 else
1415 llvm::append_range(Args, AggrConst->operands());
1416 if (!BPrepared) {
1417 IsPhi ? B.SetInsertPointPastAllocas(I->getParent()->getParent())
1418 : B.SetInsertPoint(I);
1419 BPrepared = true;
1420 }
1421 auto *CI =
1422 B.CreateIntrinsic(Intrinsic::spv_const_composite, {ResTy}, {Args});
1423 Worklist.push(CI);
1424 I->replaceUsesOfWith(Op, CI);
1425 KeepInst = true;
1426 AggrConsts[CI] = AggrConst;
1427 AggrConstTypes[CI] = deduceNestedTypeHelper(AggrConst, false);
1428 }
1429 }
1430 if (!KeepInst)
1431 Worklist.pop();
1432 }
1433}
1434
1436 IRBuilder<> &B) {
1437 LLVMContext &Ctx = I->getContext();
1439 B.CreateIntrinsic(Intrinsic::spv_assign_decoration, {I->getType()},
1440 {I, MetadataAsValue::get(Ctx, MDNode::get(Ctx, {Node}))});
1441}
1442
1444 unsigned RoundingModeDeco,
1445 IRBuilder<> &B) {
1446 LLVMContext &Ctx = I->getContext();
1448 MDNode *RoundingModeNode = MDNode::get(
1449 Ctx,
1451 ConstantInt::get(Int32Ty, SPIRV::Decoration::FPRoundingMode)),
1452 ConstantAsMetadata::get(ConstantInt::get(Int32Ty, RoundingModeDeco))});
1453 createDecorationIntrinsic(I, RoundingModeNode, B);
1454}
1455
1457 IRBuilder<> &B) {
1458 LLVMContext &Ctx = I->getContext();
1460 MDNode *SaturatedConversionNode =
1461 MDNode::get(Ctx, {ConstantAsMetadata::get(ConstantInt::get(
1462 Int32Ty, SPIRV::Decoration::SaturatedConversion))});
1463 createDecorationIntrinsic(I, SaturatedConversionNode, B);
1464}
1465
1467 if (auto *CI = dyn_cast<CallInst>(I)) {
1468 if (Function *Fu = CI->getCalledFunction()) {
1469 if (Fu->isIntrinsic()) {
1470 unsigned const int IntrinsicId = Fu->getIntrinsicID();
1471 switch (IntrinsicId) {
1472 case Intrinsic::fptosi_sat:
1473 case Intrinsic::fptoui_sat:
1475 break;
1476 default:
1477 break;
1478 }
1479 }
1480 }
1481 }
1482}
1483
1484Instruction *SPIRVEmitIntrinsics::visitCallInst(CallInst &Call) {
1485 if (!Call.isInlineAsm())
1486 return &Call;
1487
1488 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
1489 LLVMContext &Ctx = CurrF->getContext();
1490
1491 Constant *TyC = UndefValue::get(IA->getFunctionType());
1492 MDString *ConstraintString = MDString::get(Ctx, IA->getConstraintString());
1494 buildMD(TyC),
1495 MetadataAsValue::get(Ctx, MDNode::get(Ctx, ConstraintString))};
1496 for (unsigned OpIdx = 0; OpIdx < Call.arg_size(); OpIdx++)
1497 Args.push_back(Call.getArgOperand(OpIdx));
1498
1500 B.SetInsertPoint(&Call);
1501 B.CreateIntrinsic(Intrinsic::spv_inline_asm, {Args});
1502 return &Call;
1503}
1504
1505// Use a tip about rounding mode to create a decoration.
1506void SPIRVEmitIntrinsics::useRoundingMode(ConstrainedFPIntrinsic *FPI,
1507 IRBuilder<> &B) {
1508 std::optional<RoundingMode> RM = FPI->getRoundingMode();
1509 if (!RM.has_value())
1510 return;
1511 unsigned RoundingModeDeco = std::numeric_limits<unsigned>::max();
1512 switch (RM.value()) {
1513 default:
1514 // ignore unknown rounding modes
1515 break;
1516 case RoundingMode::NearestTiesToEven:
1517 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTE;
1518 break;
1519 case RoundingMode::TowardNegative:
1520 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTN;
1521 break;
1522 case RoundingMode::TowardPositive:
1523 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTP;
1524 break;
1525 case RoundingMode::TowardZero:
1526 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTZ;
1527 break;
1528 case RoundingMode::Dynamic:
1529 case RoundingMode::NearestTiesToAway:
1530 // TODO: check if supported
1531 break;
1532 }
1533 if (RoundingModeDeco == std::numeric_limits<unsigned>::max())
1534 return;
1535 // Convert the tip about rounding mode into a decoration record.
1536 createRoundingModeDecoration(FPI, RoundingModeDeco, B);
1537}
1538
1539Instruction *SPIRVEmitIntrinsics::visitSwitchInst(SwitchInst &I) {
1540 BasicBlock *ParentBB = I.getParent();
1541 IRBuilder<> B(ParentBB);
1542 B.SetInsertPoint(&I);
1545 for (auto &Op : I.operands()) {
1546 if (Op.get()->getType()->isSized()) {
1547 Args.push_back(Op);
1548 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(Op.get())) {
1549 BBCases.push_back(BB);
1550 Args.push_back(BlockAddress::get(BB->getParent(), BB));
1551 } else {
1552 report_fatal_error("Unexpected switch operand");
1553 }
1554 }
1555 CallInst *NewI = B.CreateIntrinsic(Intrinsic::spv_switch,
1556 {I.getOperand(0)->getType()}, {Args});
1557 // remove switch to avoid its unneeded and undesirable unwrap into branches
1558 // and conditions
1559 replaceAllUsesWith(&I, NewI);
1560 I.eraseFromParent();
1561 // insert artificial and temporary instruction to preserve valid CFG,
1562 // it will be removed after IR translation pass
1563 B.SetInsertPoint(ParentBB);
1564 IndirectBrInst *BrI = B.CreateIndirectBr(
1565 Constant::getNullValue(PointerType::getUnqual(ParentBB->getContext())),
1566 BBCases.size());
1567 for (BasicBlock *BBCase : BBCases)
1568 BrI->addDestination(BBCase);
1569 return BrI;
1570}
1571
1572Instruction *SPIRVEmitIntrinsics::visitGetElementPtrInst(GetElementPtrInst &I) {
1573 if (I.getSourceElementType() == IntegerType::getInt8Ty(CurrF->getContext()) &&
1575 Instruction *Result = buildLogicalAccessChainFromGEP(I);
1576 if (Result)
1577 return Result;
1578 }
1579
1580 IRBuilder<> B(I.getParent());
1581 B.SetInsertPoint(&I);
1582 SmallVector<Type *, 2> Types = {I.getType(), I.getOperand(0)->getType()};
1584 Args.push_back(B.getInt1(I.isInBounds()));
1585 llvm::append_range(Args, I.operands());
1586 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args});
1587 replaceAllUsesWithAndErase(B, &I, NewI);
1588 return NewI;
1589}
1590
1591Instruction *SPIRVEmitIntrinsics::visitBitCastInst(BitCastInst &I) {
1592 IRBuilder<> B(I.getParent());
1593 B.SetInsertPoint(&I);
1594 Value *Source = I.getOperand(0);
1595
1596 // SPIR-V, contrary to LLVM 17+ IR, supports bitcasts between pointers of
1597 // varying element types. In case of IR coming from older versions of LLVM
1598 // such bitcasts do not provide sufficient information, should be just skipped
1599 // here, and handled in insertPtrCastOrAssignTypeInstr.
1600 if (isPointerTy(I.getType())) {
1601 replaceAllUsesWith(&I, Source);
1602 I.eraseFromParent();
1603 return nullptr;
1604 }
1605
1606 SmallVector<Type *, 2> Types = {I.getType(), Source->getType()};
1607 SmallVector<Value *> Args(I.op_begin(), I.op_end());
1608 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_bitcast, {Types}, {Args});
1609 replaceAllUsesWithAndErase(B, &I, NewI);
1610 return NewI;
1611}
1612
1613void SPIRVEmitIntrinsics::insertAssignPtrTypeTargetExt(
1614 TargetExtType *AssignedType, Value *V, IRBuilder<> &B) {
1615 Type *VTy = V->getType();
1616
1617 // A couple of sanity checks.
1618 assert((isPointerTy(VTy)) && "Expect a pointer type!");
1619 if (Type *ElemTy = getPointeeType(VTy))
1620 if (ElemTy != AssignedType)
1621 report_fatal_error("Unexpected pointer element type!");
1622
1623 CallInst *AssignCI = GR->findAssignPtrTypeInstr(V);
1624 if (!AssignCI) {
1625 GR->buildAssignType(B, AssignedType, V);
1626 return;
1627 }
1628
1629 Type *CurrentType =
1631 cast<MetadataAsValue>(AssignCI->getOperand(1))->getMetadata())
1632 ->getType();
1633 if (CurrentType == AssignedType)
1634 return;
1635
1636 // Builtin types cannot be redeclared or casted.
1637 if (CurrentType->isTargetExtTy())
1638 report_fatal_error("Type mismatch " + CurrentType->getTargetExtName() +
1639 "/" + AssignedType->getTargetExtName() +
1640 " for value " + V->getName(),
1641 false);
1642
1643 // Our previous guess about the type seems to be wrong, let's update
1644 // inferred type according to a new, more precise type information.
1645 GR->updateAssignType(AssignCI, V, getNormalizedPoisonValue(AssignedType));
1646}
1647
1648void SPIRVEmitIntrinsics::replacePointerOperandWithPtrCast(
1649 Instruction *I, Value *Pointer, Type *ExpectedElementType,
1650 unsigned OperandToReplace, IRBuilder<> &B) {
1651 TypeValidated.insert(I);
1652
1653 // Do not emit spv_ptrcast if Pointer's element type is ExpectedElementType
1654 Type *PointerElemTy = deduceElementTypeHelper(Pointer, false);
1655 if (PointerElemTy == ExpectedElementType ||
1656 isEquivalentTypes(PointerElemTy, ExpectedElementType))
1657 return;
1658
1660 Value *ExpectedElementVal = getNormalizedPoisonValue(ExpectedElementType);
1661 MetadataAsValue *VMD = buildMD(ExpectedElementVal);
1662 unsigned AddressSpace = getPointerAddressSpace(Pointer->getType());
1663 bool FirstPtrCastOrAssignPtrType = true;
1664
1665 // Do not emit new spv_ptrcast if equivalent one already exists or when
1666 // spv_assign_ptr_type already targets this pointer with the same element
1667 // type.
1668 if (Pointer->hasUseList()) {
1669 for (auto User : Pointer->users()) {
1670 auto *II = dyn_cast<IntrinsicInst>(User);
1671 if (!II ||
1672 (II->getIntrinsicID() != Intrinsic::spv_assign_ptr_type &&
1673 II->getIntrinsicID() != Intrinsic::spv_ptrcast) ||
1674 II->getOperand(0) != Pointer)
1675 continue;
1676
1677 // There is some spv_ptrcast/spv_assign_ptr_type already targeting this
1678 // pointer.
1679 FirstPtrCastOrAssignPtrType = false;
1680 if (II->getOperand(1) != VMD ||
1681 dyn_cast<ConstantInt>(II->getOperand(2))->getSExtValue() !=
1683 continue;
1684
1685 // The spv_ptrcast/spv_assign_ptr_type targeting this pointer is of the
1686 // same element type and address space.
1687 if (II->getIntrinsicID() != Intrinsic::spv_ptrcast)
1688 return;
1689
1690 // This must be a spv_ptrcast, do not emit new if this one has the same BB
1691 // as I. Otherwise, search for other spv_ptrcast/spv_assign_ptr_type.
1692 if (II->getParent() != I->getParent())
1693 continue;
1694
1695 I->setOperand(OperandToReplace, II);
1696 return;
1697 }
1698 }
1699
1700 if (isa<Instruction>(Pointer) || isa<Argument>(Pointer)) {
1701 if (FirstPtrCastOrAssignPtrType) {
1702 // If this would be the first spv_ptrcast, do not emit spv_ptrcast and
1703 // emit spv_assign_ptr_type instead.
1704 GR->buildAssignPtr(B, ExpectedElementType, Pointer);
1705 return;
1706 } else if (isTodoType(Pointer)) {
1707 eraseTodoType(Pointer);
1708 if (!isa<CallInst>(Pointer) && !isa<GetElementPtrInst>(Pointer)) {
1709 // If this wouldn't be the first spv_ptrcast but existing type info is
1710 // uncomplete, update spv_assign_ptr_type arguments.
1711 if (CallInst *AssignCI = GR->findAssignPtrTypeInstr(Pointer)) {
1712 Type *PrevElemTy = GR->findDeducedElementType(Pointer);
1713 assert(PrevElemTy);
1714 DenseSet<std::pair<Value *, Value *>> VisitedSubst{
1715 std::make_pair(I, Pointer)};
1716 GR->updateAssignType(AssignCI, Pointer, ExpectedElementVal);
1717 propagateElemType(Pointer, PrevElemTy, VisitedSubst);
1718 } else {
1719 GR->buildAssignPtr(B, ExpectedElementType, Pointer);
1720 }
1721 return;
1722 }
1723 }
1724 }
1725
1726 // Emit spv_ptrcast
1727 SmallVector<Type *, 2> Types = {Pointer->getType(), Pointer->getType()};
1728 SmallVector<Value *, 2> Args = {Pointer, VMD, B.getInt32(AddressSpace)};
1729 auto *PtrCastI = B.CreateIntrinsic(Intrinsic::spv_ptrcast, {Types}, Args);
1730 I->setOperand(OperandToReplace, PtrCastI);
1731 // We need to set up a pointee type for the newly created spv_ptrcast.
1732 GR->buildAssignPtr(B, ExpectedElementType, PtrCastI);
1733}
1734
1735void SPIRVEmitIntrinsics::insertPtrCastOrAssignTypeInstr(Instruction *I,
1736 IRBuilder<> &B) {
1737 // Handle basic instructions:
1738 StoreInst *SI = dyn_cast<StoreInst>(I);
1739 if (IsKernelArgInt8(CurrF, SI)) {
1740 replacePointerOperandWithPtrCast(
1741 I, SI->getValueOperand(), IntegerType::getInt8Ty(CurrF->getContext()),
1742 0, B);
1743 }
1744 if (SI) {
1745 Value *Op = SI->getValueOperand();
1746 Value *Pointer = SI->getPointerOperand();
1747 Type *OpTy = Op->getType();
1748 if (auto *OpI = dyn_cast<Instruction>(Op))
1749 OpTy = restoreMutatedType(GR, OpI, OpTy);
1750 if (OpTy == Op->getType())
1751 OpTy = deduceElementTypeByValueDeep(OpTy, Op, false);
1752 replacePointerOperandWithPtrCast(I, Pointer, OpTy, 1, B);
1753 return;
1754 }
1755 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1756 Value *Pointer = LI->getPointerOperand();
1757 Type *OpTy = LI->getType();
1758 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
1759 if (Type *ElemTy = GR->findDeducedElementType(LI)) {
1760 OpTy = getTypedPointerWrapper(ElemTy, PtrTy->getAddressSpace());
1761 } else {
1762 Type *NewOpTy = OpTy;
1763 OpTy = deduceElementTypeByValueDeep(OpTy, LI, false);
1764 if (OpTy == NewOpTy)
1765 insertTodoType(Pointer);
1766 }
1767 }
1768 replacePointerOperandWithPtrCast(I, Pointer, OpTy, 0, B);
1769 return;
1770 }
1771 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
1772 Value *Pointer = GEPI->getPointerOperand();
1773 Type *OpTy = nullptr;
1774
1775 // Knowing the accessed type is mandatory for logical SPIR-V. Sadly,
1776 // the GEP source element type should not be used for this purpose, and
1777 // the alternative type-scavenging method is not working.
1778 // Physical SPIR-V can work around this, but not logical, hence still
1779 // try to rely on the broken type scavenging for logical.
1780 bool IsRewrittenGEP =
1781 GEPI->getSourceElementType() == IntegerType::getInt8Ty(I->getContext());
1782 if (IsRewrittenGEP && TM->getSubtargetImpl()->isLogicalSPIRV()) {
1783 Value *Src = getPointerRoot(Pointer);
1784 OpTy = GR->findDeducedElementType(Src);
1785 }
1786
1787 // In all cases, fall back to the GEP type if type scavenging failed.
1788 if (!OpTy)
1789 OpTy = GEPI->getSourceElementType();
1790
1791 replacePointerOperandWithPtrCast(I, Pointer, OpTy, 0, B);
1792 if (isNestedPointer(OpTy))
1793 insertTodoType(Pointer);
1794 return;
1795 }
1796
1797 // TODO: review and merge with existing logics:
1798 // Handle calls to builtins (non-intrinsics):
1799 CallInst *CI = dyn_cast<CallInst>(I);
1800 if (!CI || CI->isIndirectCall() || CI->isInlineAsm() ||
1802 return;
1803
1804 // collect information about formal parameter types
1805 std::string DemangledName =
1807 Function *CalledF = CI->getCalledFunction();
1808 SmallVector<Type *, 4> CalledArgTys;
1809 bool HaveTypes = false;
1810 for (unsigned OpIdx = 0; OpIdx < CalledF->arg_size(); ++OpIdx) {
1811 Argument *CalledArg = CalledF->getArg(OpIdx);
1812 Type *ArgType = CalledArg->getType();
1813 if (!isPointerTy(ArgType)) {
1814 CalledArgTys.push_back(nullptr);
1815 } else if (Type *ArgTypeElem = getPointeeType(ArgType)) {
1816 CalledArgTys.push_back(ArgTypeElem);
1817 HaveTypes = true;
1818 } else {
1819 Type *ElemTy = GR->findDeducedElementType(CalledArg);
1820 if (!ElemTy && hasPointeeTypeAttr(CalledArg))
1821 ElemTy = getPointeeTypeByAttr(CalledArg);
1822 if (!ElemTy) {
1823 ElemTy = getPointeeTypeByCallInst(DemangledName, CalledF, OpIdx);
1824 if (ElemTy) {
1825 GR->addDeducedElementType(CalledArg, normalizeType(ElemTy));
1826 } else {
1827 for (User *U : CalledArg->users()) {
1828 if (Instruction *Inst = dyn_cast<Instruction>(U)) {
1829 if ((ElemTy = deduceElementTypeHelper(Inst, false)) != nullptr)
1830 break;
1831 }
1832 }
1833 }
1834 }
1835 HaveTypes |= ElemTy != nullptr;
1836 CalledArgTys.push_back(ElemTy);
1837 }
1838 }
1839
1840 if (DemangledName.empty() && !HaveTypes)
1841 return;
1842
1843 for (unsigned OpIdx = 0; OpIdx < CI->arg_size(); OpIdx++) {
1844 Value *ArgOperand = CI->getArgOperand(OpIdx);
1845 if (!isPointerTy(ArgOperand->getType()))
1846 continue;
1847
1848 // Constants (nulls/undefs) are handled in insertAssignPtrTypeIntrs()
1849 if (!isa<Instruction>(ArgOperand) && !isa<Argument>(ArgOperand)) {
1850 // However, we may have assumptions about the formal argument's type and
1851 // may have a need to insert a ptr cast for the actual parameter of this
1852 // call.
1853 Argument *CalledArg = CalledF->getArg(OpIdx);
1854 if (!GR->findDeducedElementType(CalledArg))
1855 continue;
1856 }
1857
1858 Type *ExpectedType =
1859 OpIdx < CalledArgTys.size() ? CalledArgTys[OpIdx] : nullptr;
1860 if (!ExpectedType && !DemangledName.empty())
1861 ExpectedType = SPIRV::parseBuiltinCallArgumentBaseType(
1862 DemangledName, OpIdx, I->getContext());
1863 if (!ExpectedType || ExpectedType->isVoidTy())
1864 continue;
1865
1866 if (ExpectedType->isTargetExtTy() &&
1868 insertAssignPtrTypeTargetExt(cast<TargetExtType>(ExpectedType),
1869 ArgOperand, B);
1870 else
1871 replacePointerOperandWithPtrCast(CI, ArgOperand, ExpectedType, OpIdx, B);
1872 }
1873}
1874
1875Instruction *SPIRVEmitIntrinsics::visitInsertElementInst(InsertElementInst &I) {
1876 // If it's a <1 x Type> vector type, don't modify it. It's not a legal vector
1877 // type in LLT and IRTranslator will replace it by the scalar.
1878 if (isVector1(I.getType()))
1879 return &I;
1880
1881 SmallVector<Type *, 4> Types = {I.getType(), I.getOperand(0)->getType(),
1882 I.getOperand(1)->getType(),
1883 I.getOperand(2)->getType()};
1884 IRBuilder<> B(I.getParent());
1885 B.SetInsertPoint(&I);
1886 SmallVector<Value *> Args(I.op_begin(), I.op_end());
1887 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_insertelt, {Types}, {Args});
1888 replaceAllUsesWithAndErase(B, &I, NewI);
1889 return NewI;
1890}
1891
1893SPIRVEmitIntrinsics::visitExtractElementInst(ExtractElementInst &I) {
1894 // If it's a <1 x Type> vector type, don't modify it. It's not a legal vector
1895 // type in LLT and IRTranslator will replace it by the scalar.
1896 if (isVector1(I.getVectorOperandType()))
1897 return &I;
1898
1899 IRBuilder<> B(I.getParent());
1900 B.SetInsertPoint(&I);
1901 SmallVector<Type *, 3> Types = {I.getType(), I.getVectorOperandType(),
1902 I.getIndexOperand()->getType()};
1903 SmallVector<Value *, 2> Args = {I.getVectorOperand(), I.getIndexOperand()};
1904 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_extractelt, {Types}, {Args});
1905 replaceAllUsesWithAndErase(B, &I, NewI);
1906 return NewI;
1907}
1908
1909Instruction *SPIRVEmitIntrinsics::visitInsertValueInst(InsertValueInst &I) {
1910 IRBuilder<> B(I.getParent());
1911 B.SetInsertPoint(&I);
1912 SmallVector<Type *, 1> Types = {I.getInsertedValueOperand()->getType()};
1914 Value *AggregateOp = I.getAggregateOperand();
1915 if (isa<UndefValue>(AggregateOp))
1916 Args.push_back(UndefValue::get(B.getInt32Ty()));
1917 else
1918 Args.push_back(AggregateOp);
1919 Args.push_back(I.getInsertedValueOperand());
1920 for (auto &Op : I.indices())
1921 Args.push_back(B.getInt32(Op));
1922 Instruction *NewI =
1923 B.CreateIntrinsic(Intrinsic::spv_insertv, {Types}, {Args});
1924 replaceMemInstrUses(&I, NewI, B);
1925 return NewI;
1926}
1927
1928Instruction *SPIRVEmitIntrinsics::visitExtractValueInst(ExtractValueInst &I) {
1929 if (I.getAggregateOperand()->getType()->isAggregateType())
1930 return &I;
1931 IRBuilder<> B(I.getParent());
1932 B.SetInsertPoint(&I);
1933 SmallVector<Value *> Args(I.operands());
1934 for (auto &Op : I.indices())
1935 Args.push_back(B.getInt32(Op));
1936 auto *NewI =
1937 B.CreateIntrinsic(Intrinsic::spv_extractv, {I.getType()}, {Args});
1938 replaceAllUsesWithAndErase(B, &I, NewI);
1939 return NewI;
1940}
1941
1942Instruction *SPIRVEmitIntrinsics::visitLoadInst(LoadInst &I) {
1943 if (!I.getType()->isAggregateType())
1944 return &I;
1945 IRBuilder<> B(I.getParent());
1946 B.SetInsertPoint(&I);
1947 TrackConstants = false;
1948 const auto *TLI = TM->getSubtargetImpl()->getTargetLowering();
1950 TLI->getLoadMemOperandFlags(I, CurrF->getDataLayout());
1951 auto *NewI =
1952 B.CreateIntrinsic(Intrinsic::spv_load, {I.getOperand(0)->getType()},
1953 {I.getPointerOperand(), B.getInt16(Flags),
1954 B.getInt8(I.getAlign().value())});
1955 replaceMemInstrUses(&I, NewI, B);
1956 return NewI;
1957}
1958
1959Instruction *SPIRVEmitIntrinsics::visitStoreInst(StoreInst &I) {
1960 if (!AggrStores.contains(&I))
1961 return &I;
1962 IRBuilder<> B(I.getParent());
1963 B.SetInsertPoint(&I);
1964 TrackConstants = false;
1965 const auto *TLI = TM->getSubtargetImpl()->getTargetLowering();
1967 TLI->getStoreMemOperandFlags(I, CurrF->getDataLayout());
1968 auto *PtrOp = I.getPointerOperand();
1969 auto *NewI = B.CreateIntrinsic(
1970 Intrinsic::spv_store, {I.getValueOperand()->getType(), PtrOp->getType()},
1971 {I.getValueOperand(), PtrOp, B.getInt16(Flags),
1972 B.getInt8(I.getAlign().value())});
1973 NewI->copyMetadata(I);
1974 I.eraseFromParent();
1975 return NewI;
1976}
1977
1978Instruction *SPIRVEmitIntrinsics::visitAllocaInst(AllocaInst &I) {
1979 Value *ArraySize = nullptr;
1980 if (I.isArrayAllocation()) {
1981 const SPIRVSubtarget *STI = TM->getSubtargetImpl(*I.getFunction());
1982 if (!STI->canUseExtension(
1983 SPIRV::Extension::SPV_INTEL_variable_length_array))
1985 "array allocation: this instruction requires the following "
1986 "SPIR-V extension: SPV_INTEL_variable_length_array",
1987 false);
1988 ArraySize = I.getArraySize();
1989 }
1990 IRBuilder<> B(I.getParent());
1991 B.SetInsertPoint(&I);
1992 TrackConstants = false;
1993 Type *PtrTy = I.getType();
1994 auto *NewI =
1995 ArraySize
1996 ? B.CreateIntrinsic(Intrinsic::spv_alloca_array,
1997 {PtrTy, ArraySize->getType()},
1998 {ArraySize, B.getInt8(I.getAlign().value())})
1999 : B.CreateIntrinsic(Intrinsic::spv_alloca, {PtrTy},
2000 {B.getInt8(I.getAlign().value())});
2001 replaceAllUsesWithAndErase(B, &I, NewI);
2002 return NewI;
2003}
2004
2005Instruction *SPIRVEmitIntrinsics::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
2006 assert(I.getType()->isAggregateType() && "Aggregate result is expected");
2007 IRBuilder<> B(I.getParent());
2008 B.SetInsertPoint(&I);
2009 SmallVector<Value *> Args(I.operands());
2010 Args.push_back(B.getInt32(
2011 static_cast<uint32_t>(getMemScope(I.getContext(), I.getSyncScopeID()))));
2012 Args.push_back(B.getInt32(
2013 static_cast<uint32_t>(getMemSemantics(I.getSuccessOrdering()))));
2014 Args.push_back(B.getInt32(
2015 static_cast<uint32_t>(getMemSemantics(I.getFailureOrdering()))));
2016 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_cmpxchg,
2017 {I.getPointerOperand()->getType()}, {Args});
2018 replaceMemInstrUses(&I, NewI, B);
2019 return NewI;
2020}
2021
2022Instruction *SPIRVEmitIntrinsics::visitUnreachableInst(UnreachableInst &I) {
2023 IRBuilder<> B(I.getParent());
2024 B.SetInsertPoint(&I);
2025 B.CreateIntrinsic(Intrinsic::spv_unreachable, {});
2026 return &I;
2027}
2028
2029void SPIRVEmitIntrinsics::processGlobalValue(GlobalVariable &GV,
2030 IRBuilder<> &B) {
2031 // Skip special artificial variables.
2032 static const StringSet<> ArtificialGlobals{"llvm.global.annotations",
2033 "llvm.compiler.used"};
2034
2035 if (ArtificialGlobals.contains(GV.getName()))
2036 return;
2037
2038 Constant *Init = nullptr;
2039 if (hasInitializer(&GV)) {
2040 // Deduce element type and store results in Global Registry.
2041 // Result is ignored, because TypedPointerType is not supported
2042 // by llvm IR general logic.
2043 deduceElementTypeHelper(&GV, false);
2044 Init = GV.getInitializer();
2045 Type *Ty = isAggrConstForceInt32(Init) ? B.getInt32Ty() : Init->getType();
2046 Constant *Const = isAggrConstForceInt32(Init) ? B.getInt32(1) : Init;
2047 auto *InitInst = B.CreateIntrinsic(Intrinsic::spv_init_global,
2048 {GV.getType(), Ty}, {&GV, Const});
2049 InitInst->setArgOperand(1, Init);
2050 }
2051 if (!Init && GV.use_empty())
2052 B.CreateIntrinsic(Intrinsic::spv_unref_global, GV.getType(), &GV);
2053}
2054
2055// Return true, if we can't decide what is the pointee type now and will get
2056// back to the question later. Return false is spv_assign_ptr_type is not needed
2057// or can be inserted immediately.
2058bool SPIRVEmitIntrinsics::insertAssignPtrTypeIntrs(Instruction *I,
2059 IRBuilder<> &B,
2060 bool UnknownElemTypeI8) {
2062 if (!isPointerTy(I->getType()) || !requireAssignType(I))
2063 return false;
2064
2066 if (Type *ElemTy = deduceElementType(I, UnknownElemTypeI8)) {
2067 GR->buildAssignPtr(B, ElemTy, I);
2068 return false;
2069 }
2070 return true;
2071}
2072
2073void SPIRVEmitIntrinsics::insertAssignTypeIntrs(Instruction *I,
2074 IRBuilder<> &B) {
2075 // TODO: extend the list of functions with known result types
2076 static StringMap<unsigned> ResTypeWellKnown = {
2077 {"async_work_group_copy", WellKnownTypes::Event},
2078 {"async_work_group_strided_copy", WellKnownTypes::Event},
2079 {"__spirv_GroupAsyncCopy", WellKnownTypes::Event}};
2080
2082
2083 bool IsKnown = false;
2084 if (auto *CI = dyn_cast<CallInst>(I)) {
2085 if (!CI->isIndirectCall() && !CI->isInlineAsm() &&
2086 CI->getCalledFunction() && !CI->getCalledFunction()->isIntrinsic()) {
2087 Function *CalledF = CI->getCalledFunction();
2088 std::string DemangledName =
2090 FPDecorationId DecorationId = FPDecorationId::NONE;
2091 if (DemangledName.length() > 0)
2092 DemangledName =
2093 SPIRV::lookupBuiltinNameHelper(DemangledName, &DecorationId);
2094 auto ResIt = ResTypeWellKnown.find(DemangledName);
2095 if (ResIt != ResTypeWellKnown.end()) {
2096 IsKnown = true;
2098 switch (ResIt->second) {
2099 case WellKnownTypes::Event:
2100 GR->buildAssignType(
2101 B, TargetExtType::get(I->getContext(), "spirv.Event"), I);
2102 break;
2103 }
2104 }
2105 // check if a floating rounding mode or saturation info is present
2106 switch (DecorationId) {
2107 default:
2108 break;
2109 case FPDecorationId::SAT:
2111 break;
2112 case FPDecorationId::RTE:
2114 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTE, B);
2115 break;
2116 case FPDecorationId::RTZ:
2118 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTZ, B);
2119 break;
2120 case FPDecorationId::RTP:
2122 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTP, B);
2123 break;
2124 case FPDecorationId::RTN:
2126 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTN, B);
2127 break;
2128 }
2129 }
2130 }
2131
2132 Type *Ty = I->getType();
2133 if (!IsKnown && !Ty->isVoidTy() && !isPointerTy(Ty) && requireAssignType(I)) {
2135 Type *TypeToAssign = Ty;
2136 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
2137 if (II->getIntrinsicID() == Intrinsic::spv_const_composite ||
2138 II->getIntrinsicID() == Intrinsic::spv_undef) {
2139 auto It = AggrConstTypes.find(II);
2140 if (It == AggrConstTypes.end())
2141 report_fatal_error("Unknown composite intrinsic type");
2142 TypeToAssign = It->second;
2143 }
2144 }
2145 TypeToAssign = restoreMutatedType(GR, I, TypeToAssign);
2146 GR->buildAssignType(B, TypeToAssign, I);
2147 }
2148 for (const auto &Op : I->operands()) {
2150 // Check GetElementPtrConstantExpr case.
2152 (isa<GEPOperator>(Op) ||
2153 (cast<ConstantExpr>(Op)->getOpcode() == CastInst::IntToPtr)))) {
2155 Type *OpTy = Op->getType();
2156 if (isa<UndefValue>(Op) && OpTy->isAggregateType()) {
2157 CallInst *AssignCI =
2158 buildIntrWithMD(Intrinsic::spv_assign_type, {B.getInt32Ty()}, Op,
2159 UndefValue::get(B.getInt32Ty()), {}, B);
2160 GR->addAssignPtrTypeInstr(Op, AssignCI);
2161 } else if (!isa<Instruction>(Op)) {
2162 Type *OpTy = Op->getType();
2163 Type *OpTyElem = getPointeeType(OpTy);
2164 if (OpTyElem) {
2165 GR->buildAssignPtr(B, OpTyElem, Op);
2166 } else if (isPointerTy(OpTy)) {
2167 Type *ElemTy = GR->findDeducedElementType(Op);
2168 GR->buildAssignPtr(B, ElemTy ? ElemTy : deduceElementType(Op, true),
2169 Op);
2170 } else {
2171 Value *OpTyVal = Op;
2172 if (OpTy->isTargetExtTy()) {
2173 // We need to do this in order to be consistent with how target ext
2174 // types are handled in `processInstrAfterVisit`
2175 OpTyVal = getNormalizedPoisonValue(OpTy);
2176 }
2177 CallInst *AssignCI =
2178 buildIntrWithMD(Intrinsic::spv_assign_type, {OpTy},
2179 getNormalizedPoisonValue(OpTy), OpTyVal, {}, B);
2180 GR->addAssignPtrTypeInstr(OpTyVal, AssignCI);
2181 }
2182 }
2183 }
2184 }
2185}
2186
2187bool SPIRVEmitIntrinsics::shouldTryToAddMemAliasingDecoration(
2188 Instruction *Inst) {
2189 const SPIRVSubtarget *STI = TM->getSubtargetImpl(*Inst->getFunction());
2190 if (!STI->canUseExtension(SPIRV::Extension::SPV_INTEL_memory_access_aliasing))
2191 return false;
2192 // Add aliasing decorations to internal load and store intrinsics
2193 // and atomic instructions, skipping atomic store as it won't have ID to
2194 // attach the decoration.
2195 CallInst *CI = dyn_cast<CallInst>(Inst);
2196 if (!CI)
2197 return false;
2198 if (Function *Fun = CI->getCalledFunction()) {
2199 if (Fun->isIntrinsic()) {
2200 switch (Fun->getIntrinsicID()) {
2201 case Intrinsic::spv_load:
2202 case Intrinsic::spv_store:
2203 return true;
2204 default:
2205 return false;
2206 }
2207 }
2209 const std::string Prefix = "__spirv_Atomic";
2210 const bool IsAtomic = Name.find(Prefix) == 0;
2211
2212 if (!Fun->getReturnType()->isVoidTy() && IsAtomic)
2213 return true;
2214 }
2215 return false;
2216}
2217
2218void SPIRVEmitIntrinsics::insertSpirvDecorations(Instruction *I,
2219 IRBuilder<> &B) {
2220 if (MDNode *MD = I->getMetadata("spirv.Decorations")) {
2222 B.CreateIntrinsic(Intrinsic::spv_assign_decoration, {I->getType()},
2223 {I, MetadataAsValue::get(I->getContext(), MD)});
2224 }
2225 // Lower alias.scope/noalias metadata
2226 {
2227 auto processMemAliasingDecoration = [&](unsigned Kind) {
2228 if (MDNode *AliasListMD = I->getMetadata(Kind)) {
2229 if (shouldTryToAddMemAliasingDecoration(I)) {
2230 uint32_t Dec = Kind == LLVMContext::MD_alias_scope
2231 ? SPIRV::Decoration::AliasScopeINTEL
2232 : SPIRV::Decoration::NoAliasINTEL;
2234 I, ConstantInt::get(B.getInt32Ty(), Dec),
2235 MetadataAsValue::get(I->getContext(), AliasListMD)};
2237 B.CreateIntrinsic(Intrinsic::spv_assign_aliasing_decoration,
2238 {I->getType()}, {Args});
2239 }
2240 }
2241 };
2242 processMemAliasingDecoration(LLVMContext::MD_alias_scope);
2243 processMemAliasingDecoration(LLVMContext::MD_noalias);
2244 }
2245 // MD_fpmath
2246 if (MDNode *MD = I->getMetadata(LLVMContext::MD_fpmath)) {
2247 const SPIRVSubtarget *STI = TM->getSubtargetImpl(*I->getFunction());
2248 bool AllowFPMaxError =
2249 STI->canUseExtension(SPIRV::Extension::SPV_INTEL_fp_max_error);
2250 if (!AllowFPMaxError)
2251 return;
2252
2254 B.CreateIntrinsic(Intrinsic::spv_assign_fpmaxerror_decoration,
2255 {I->getType()},
2256 {I, MetadataAsValue::get(I->getContext(), MD)});
2257 }
2258}
2259
2261 const Module &M,
2263 &FPFastMathDefaultInfoMap,
2264 Function *F) {
2265 auto it = FPFastMathDefaultInfoMap.find(F);
2266 if (it != FPFastMathDefaultInfoMap.end())
2267 return it->second;
2268
2269 // If the map does not contain the entry, create a new one. Initialize it to
2270 // contain all 3 elements sorted by bit width of target type: {half, float,
2271 // double}.
2272 SPIRV::FPFastMathDefaultInfoVector FPFastMathDefaultInfoVec;
2273 FPFastMathDefaultInfoVec.emplace_back(Type::getHalfTy(M.getContext()),
2274 SPIRV::FPFastMathMode::None);
2275 FPFastMathDefaultInfoVec.emplace_back(Type::getFloatTy(M.getContext()),
2276 SPIRV::FPFastMathMode::None);
2277 FPFastMathDefaultInfoVec.emplace_back(Type::getDoubleTy(M.getContext()),
2278 SPIRV::FPFastMathMode::None);
2279 return FPFastMathDefaultInfoMap[F] = std::move(FPFastMathDefaultInfoVec);
2280}
2281
2283 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec,
2284 const Type *Ty) {
2285 size_t BitWidth = Ty->getScalarSizeInBits();
2286 int Index =
2288 BitWidth);
2289 assert(Index >= 0 && Index < 3 &&
2290 "Expected FPFastMathDefaultInfo for half, float, or double");
2291 assert(FPFastMathDefaultInfoVec.size() == 3 &&
2292 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2293 return FPFastMathDefaultInfoVec[Index];
2294}
2295
2296void SPIRVEmitIntrinsics::insertConstantsForFPFastMathDefault(Module &M) {
2297 const SPIRVSubtarget *ST = TM->getSubtargetImpl();
2298 if (!ST->canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2))
2299 return;
2300
2301 // Store the FPFastMathDefaultInfo in the FPFastMathDefaultInfoMap.
2302 // We need the entry point (function) as the key, and the target
2303 // type and flags as the value.
2304 // We also need to check ContractionOff and SignedZeroInfNanPreserve
2305 // execution modes, as they are now deprecated and must be replaced
2306 // with FPFastMathDefaultInfo.
2307 auto Node = M.getNamedMetadata("spirv.ExecutionMode");
2308 if (!Node) {
2309 if (!M.getNamedMetadata("opencl.enable.FP_CONTRACT")) {
2310 // This requires emitting ContractionOff. However, because
2311 // ContractionOff is now deprecated, we need to replace it with
2312 // FPFastMathDefaultInfo with FP Fast Math Mode bitmask set to all 0.
2313 // We need to create the constant for that.
2314
2315 // Create constant instruction with the bitmask flags.
2316 Constant *InitValue =
2317 ConstantInt::get(Type::getInt32Ty(M.getContext()), 0);
2318 // TODO: Reuse constant if there is one already with the required
2319 // value.
2320 [[maybe_unused]] GlobalVariable *GV =
2321 new GlobalVariable(M, // Module
2322 Type::getInt32Ty(M.getContext()), // Type
2323 true, // isConstant
2325 InitValue // Initializer
2326 );
2327 }
2328 return;
2329 }
2330
2331 // The table maps function pointers to their default FP fast math info. It
2332 // can be assumed that the SmallVector is sorted by the bit width of the
2333 // type. The first element is the smallest bit width, and the last element
2334 // is the largest bit width, therefore, we will have {half, float, double}
2335 // in the order of their bit widths.
2336 DenseMap<Function *, SPIRV::FPFastMathDefaultInfoVector>
2337 FPFastMathDefaultInfoMap;
2338
2339 for (unsigned i = 0; i < Node->getNumOperands(); i++) {
2340 MDNode *MDN = cast<MDNode>(Node->getOperand(i));
2341 assert(MDN->getNumOperands() >= 2 && "Expected at least 2 operands");
2343 cast<ConstantAsMetadata>(MDN->getOperand(0))->getValue());
2344 const auto EM =
2346 cast<ConstantAsMetadata>(MDN->getOperand(1))->getValue())
2347 ->getZExtValue();
2348 if (EM == SPIRV::ExecutionMode::FPFastMathDefault) {
2349 assert(MDN->getNumOperands() == 4 &&
2350 "Expected 4 operands for FPFastMathDefault");
2351 const Type *T = cast<ValueAsMetadata>(MDN->getOperand(2))->getType();
2352 unsigned Flags =
2354 cast<ConstantAsMetadata>(MDN->getOperand(3))->getValue())
2355 ->getZExtValue();
2356 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2357 getOrCreateFPFastMathDefaultInfoVec(M, FPFastMathDefaultInfoMap, F);
2358 SPIRV::FPFastMathDefaultInfo &Info =
2359 getFPFastMathDefaultInfo(FPFastMathDefaultInfoVec, T);
2360 Info.FastMathFlags = Flags;
2361 Info.FPFastMathDefault = true;
2362 } else if (EM == SPIRV::ExecutionMode::ContractionOff) {
2363 assert(MDN->getNumOperands() == 2 &&
2364 "Expected no operands for ContractionOff");
2365
2366 // We need to save this info for every possible FP type, i.e. {half,
2367 // float, double, fp128}.
2368 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2369 getOrCreateFPFastMathDefaultInfoVec(M, FPFastMathDefaultInfoMap, F);
2370 for (SPIRV::FPFastMathDefaultInfo &Info : FPFastMathDefaultInfoVec) {
2371 Info.ContractionOff = true;
2372 }
2373 } else if (EM == SPIRV::ExecutionMode::SignedZeroInfNanPreserve) {
2374 assert(MDN->getNumOperands() == 3 &&
2375 "Expected 1 operand for SignedZeroInfNanPreserve");
2376 unsigned TargetWidth =
2378 cast<ConstantAsMetadata>(MDN->getOperand(2))->getValue())
2379 ->getZExtValue();
2380 // We need to save this info only for the FP type with TargetWidth.
2381 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2382 getOrCreateFPFastMathDefaultInfoVec(M, FPFastMathDefaultInfoMap, F);
2385 assert(Index >= 0 && Index < 3 &&
2386 "Expected FPFastMathDefaultInfo for half, float, or double");
2387 assert(FPFastMathDefaultInfoVec.size() == 3 &&
2388 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2389 FPFastMathDefaultInfoVec[Index].SignedZeroInfNanPreserve = true;
2390 }
2391 }
2392
2393 std::unordered_map<unsigned, GlobalVariable *> GlobalVars;
2394 for (auto &[Func, FPFastMathDefaultInfoVec] : FPFastMathDefaultInfoMap) {
2395 if (FPFastMathDefaultInfoVec.empty())
2396 continue;
2397
2398 for (const SPIRV::FPFastMathDefaultInfo &Info : FPFastMathDefaultInfoVec) {
2399 assert(Info.Ty && "Expected target type for FPFastMathDefaultInfo");
2400 // Skip if none of the execution modes was used.
2401 unsigned Flags = Info.FastMathFlags;
2402 if (Flags == SPIRV::FPFastMathMode::None && !Info.ContractionOff &&
2403 !Info.SignedZeroInfNanPreserve && !Info.FPFastMathDefault)
2404 continue;
2405
2406 // Check if flags are compatible.
2407 if (Info.ContractionOff && (Flags & SPIRV::FPFastMathMode::AllowContract))
2408 report_fatal_error("Conflicting FPFastMathFlags: ContractionOff "
2409 "and AllowContract");
2410
2411 if (Info.SignedZeroInfNanPreserve &&
2412 !(Flags &
2413 (SPIRV::FPFastMathMode::NotNaN | SPIRV::FPFastMathMode::NotInf |
2414 SPIRV::FPFastMathMode::NSZ))) {
2415 if (Info.FPFastMathDefault)
2416 report_fatal_error("Conflicting FPFastMathFlags: "
2417 "SignedZeroInfNanPreserve but at least one of "
2418 "NotNaN/NotInf/NSZ is enabled.");
2419 }
2420
2421 if ((Flags & SPIRV::FPFastMathMode::AllowTransform) &&
2422 !((Flags & SPIRV::FPFastMathMode::AllowReassoc) &&
2423 (Flags & SPIRV::FPFastMathMode::AllowContract))) {
2424 report_fatal_error("Conflicting FPFastMathFlags: "
2425 "AllowTransform requires AllowReassoc and "
2426 "AllowContract to be set.");
2427 }
2428
2429 auto it = GlobalVars.find(Flags);
2430 GlobalVariable *GV = nullptr;
2431 if (it != GlobalVars.end()) {
2432 // Reuse existing global variable.
2433 GV = it->second;
2434 } else {
2435 // Create constant instruction with the bitmask flags.
2436 Constant *InitValue =
2437 ConstantInt::get(Type::getInt32Ty(M.getContext()), Flags);
2438 // TODO: Reuse constant if there is one already with the required
2439 // value.
2440 GV = new GlobalVariable(M, // Module
2441 Type::getInt32Ty(M.getContext()), // Type
2442 true, // isConstant
2444 InitValue // Initializer
2445 );
2446 GlobalVars[Flags] = GV;
2447 }
2448 }
2449 }
2450}
2451
2452void SPIRVEmitIntrinsics::processInstrAfterVisit(Instruction *I,
2453 IRBuilder<> &B) {
2454 auto *II = dyn_cast<IntrinsicInst>(I);
2455 bool IsConstComposite =
2456 II && II->getIntrinsicID() == Intrinsic::spv_const_composite;
2457 if (IsConstComposite && TrackConstants) {
2459 auto t = AggrConsts.find(I);
2460 assert(t != AggrConsts.end());
2461 auto *NewOp =
2462 buildIntrWithMD(Intrinsic::spv_track_constant,
2463 {II->getType(), II->getType()}, t->second, I, {}, B);
2464 replaceAllUsesWith(I, NewOp, false);
2465 NewOp->setArgOperand(0, I);
2466 }
2467 bool IsPhi = isa<PHINode>(I), BPrepared = false;
2468 for (const auto &Op : I->operands()) {
2469 if (isa<PHINode>(I) || isa<SwitchInst>(I) ||
2471 continue;
2472 unsigned OpNo = Op.getOperandNo();
2473 if (II && ((II->getIntrinsicID() == Intrinsic::spv_gep && OpNo == 0) ||
2474 (II->paramHasAttr(OpNo, Attribute::ImmArg))))
2475 continue;
2476
2477 if (!BPrepared) {
2478 IsPhi ? B.SetInsertPointPastAllocas(I->getParent()->getParent())
2479 : B.SetInsertPoint(I);
2480 BPrepared = true;
2481 }
2482 Type *OpTy = Op->getType();
2483 Type *OpElemTy = GR->findDeducedElementType(Op);
2484 Value *NewOp = Op;
2485 if (OpTy->isTargetExtTy()) {
2486 // Since this value is replaced by poison, we need to do the same in
2487 // `insertAssignTypeIntrs`.
2488 Value *OpTyVal = getNormalizedPoisonValue(OpTy);
2489 NewOp = buildIntrWithMD(Intrinsic::spv_track_constant,
2490 {OpTy, OpTyVal->getType()}, Op, OpTyVal, {}, B);
2491 }
2492 if (!IsConstComposite && isPointerTy(OpTy) && OpElemTy != nullptr &&
2493 OpElemTy != IntegerType::getInt8Ty(I->getContext())) {
2494 SmallVector<Type *, 2> Types = {OpTy, OpTy};
2495 SmallVector<Value *, 2> Args = {
2496 NewOp, buildMD(getNormalizedPoisonValue(OpElemTy)),
2497 B.getInt32(getPointerAddressSpace(OpTy))};
2498 CallInst *PtrCasted =
2499 B.CreateIntrinsic(Intrinsic::spv_ptrcast, {Types}, Args);
2500 GR->buildAssignPtr(B, OpElemTy, PtrCasted);
2501 NewOp = PtrCasted;
2502 }
2503 if (NewOp != Op)
2504 I->setOperand(OpNo, NewOp);
2505 }
2506 if (Named.insert(I).second)
2507 emitAssignName(I, B);
2508}
2509
2510Type *SPIRVEmitIntrinsics::deduceFunParamElementType(Function *F,
2511 unsigned OpIdx) {
2512 std::unordered_set<Function *> FVisited;
2513 return deduceFunParamElementType(F, OpIdx, FVisited);
2514}
2515
2516Type *SPIRVEmitIntrinsics::deduceFunParamElementType(
2517 Function *F, unsigned OpIdx, std::unordered_set<Function *> &FVisited) {
2518 // maybe a cycle
2519 if (!FVisited.insert(F).second)
2520 return nullptr;
2521
2522 std::unordered_set<Value *> Visited;
2524 // search in function's call sites
2525 for (User *U : F->users()) {
2526 CallInst *CI = dyn_cast<CallInst>(U);
2527 if (!CI || OpIdx >= CI->arg_size())
2528 continue;
2529 Value *OpArg = CI->getArgOperand(OpIdx);
2530 if (!isPointerTy(OpArg->getType()))
2531 continue;
2532 // maybe we already know operand's element type
2533 if (Type *KnownTy = GR->findDeducedElementType(OpArg))
2534 return KnownTy;
2535 // try to deduce from the operand itself
2536 Visited.clear();
2537 if (Type *Ty = deduceElementTypeHelper(OpArg, Visited, false))
2538 return Ty;
2539 // search in actual parameter's users
2540 for (User *OpU : OpArg->users()) {
2542 if (!Inst || Inst == CI)
2543 continue;
2544 Visited.clear();
2545 if (Type *Ty = deduceElementTypeHelper(Inst, Visited, false))
2546 return Ty;
2547 }
2548 // check if it's a formal parameter of the outer function
2549 if (!CI->getParent() || !CI->getParent()->getParent())
2550 continue;
2551 Function *OuterF = CI->getParent()->getParent();
2552 if (FVisited.find(OuterF) != FVisited.end())
2553 continue;
2554 for (unsigned i = 0; i < OuterF->arg_size(); ++i) {
2555 if (OuterF->getArg(i) == OpArg) {
2556 Lookup.push_back(std::make_pair(OuterF, i));
2557 break;
2558 }
2559 }
2560 }
2561
2562 // search in function parameters
2563 for (auto &Pair : Lookup) {
2564 if (Type *Ty = deduceFunParamElementType(Pair.first, Pair.second, FVisited))
2565 return Ty;
2566 }
2567
2568 return nullptr;
2569}
2570
2571void SPIRVEmitIntrinsics::processParamTypesByFunHeader(Function *F,
2572 IRBuilder<> &B) {
2573 B.SetInsertPointPastAllocas(F);
2574 for (unsigned OpIdx = 0; OpIdx < F->arg_size(); ++OpIdx) {
2575 Argument *Arg = F->getArg(OpIdx);
2576 if (!isUntypedPointerTy(Arg->getType()))
2577 continue;
2578 Type *ElemTy = GR->findDeducedElementType(Arg);
2579 if (ElemTy)
2580 continue;
2581 if (hasPointeeTypeAttr(Arg) &&
2582 (ElemTy = getPointeeTypeByAttr(Arg)) != nullptr) {
2583 GR->buildAssignPtr(B, ElemTy, Arg);
2584 continue;
2585 }
2586 // search in function's call sites
2587 for (User *U : F->users()) {
2588 CallInst *CI = dyn_cast<CallInst>(U);
2589 if (!CI || OpIdx >= CI->arg_size())
2590 continue;
2591 Value *OpArg = CI->getArgOperand(OpIdx);
2592 if (!isPointerTy(OpArg->getType()))
2593 continue;
2594 // maybe we already know operand's element type
2595 if ((ElemTy = GR->findDeducedElementType(OpArg)) != nullptr)
2596 break;
2597 }
2598 if (ElemTy) {
2599 GR->buildAssignPtr(B, ElemTy, Arg);
2600 continue;
2601 }
2602 if (HaveFunPtrs) {
2603 for (User *U : Arg->users()) {
2604 CallInst *CI = dyn_cast<CallInst>(U);
2605 if (CI && !isa<IntrinsicInst>(CI) && CI->isIndirectCall() &&
2606 CI->getCalledOperand() == Arg &&
2607 CI->getParent()->getParent() == CurrF) {
2609 deduceOperandElementTypeFunctionPointer(CI, Ops, ElemTy, false);
2610 if (ElemTy) {
2611 GR->buildAssignPtr(B, ElemTy, Arg);
2612 break;
2613 }
2614 }
2615 }
2616 }
2617 }
2618}
2619
2620void SPIRVEmitIntrinsics::processParamTypes(Function *F, IRBuilder<> &B) {
2621 B.SetInsertPointPastAllocas(F);
2622 for (unsigned OpIdx = 0; OpIdx < F->arg_size(); ++OpIdx) {
2623 Argument *Arg = F->getArg(OpIdx);
2624 if (!isUntypedPointerTy(Arg->getType()))
2625 continue;
2626 Type *ElemTy = GR->findDeducedElementType(Arg);
2627 if (!ElemTy && (ElemTy = deduceFunParamElementType(F, OpIdx)) != nullptr) {
2628 if (CallInst *AssignCI = GR->findAssignPtrTypeInstr(Arg)) {
2629 DenseSet<std::pair<Value *, Value *>> VisitedSubst;
2630 GR->updateAssignType(AssignCI, Arg, getNormalizedPoisonValue(ElemTy));
2631 propagateElemType(Arg, IntegerType::getInt8Ty(F->getContext()),
2632 VisitedSubst);
2633 } else {
2634 GR->buildAssignPtr(B, ElemTy, Arg);
2635 }
2636 }
2637 }
2638}
2639
2641 SPIRVGlobalRegistry *GR) {
2642 FunctionType *FTy = F->getFunctionType();
2643 bool IsNewFTy = false;
2645 for (Argument &Arg : F->args()) {
2646 Type *ArgTy = Arg.getType();
2647 if (ArgTy->isPointerTy())
2648 if (Type *ElemTy = GR->findDeducedElementType(&Arg)) {
2649 IsNewFTy = true;
2650 ArgTy = getTypedPointerWrapper(ElemTy, getPointerAddressSpace(ArgTy));
2651 }
2652 ArgTys.push_back(ArgTy);
2653 }
2654 return IsNewFTy
2655 ? FunctionType::get(FTy->getReturnType(), ArgTys, FTy->isVarArg())
2656 : FTy;
2657}
2658
2659bool SPIRVEmitIntrinsics::processFunctionPointers(Module &M) {
2660 SmallVector<Function *> Worklist;
2661 for (auto &F : M) {
2662 if (F.isIntrinsic())
2663 continue;
2664 if (F.isDeclaration()) {
2665 for (User *U : F.users()) {
2666 CallInst *CI = dyn_cast<CallInst>(U);
2667 if (!CI || CI->getCalledFunction() != &F) {
2668 Worklist.push_back(&F);
2669 break;
2670 }
2671 }
2672 } else {
2673 if (F.user_empty())
2674 continue;
2675 Type *FPElemTy = GR->findDeducedElementType(&F);
2676 if (!FPElemTy)
2677 FPElemTy = getFunctionPointerElemType(&F, GR);
2678 for (User *U : F.users()) {
2679 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
2680 if (!II || II->arg_size() != 3 || II->getOperand(0) != &F)
2681 continue;
2682 if (II->getIntrinsicID() == Intrinsic::spv_assign_ptr_type ||
2683 II->getIntrinsicID() == Intrinsic::spv_ptrcast) {
2685 break;
2686 }
2687 }
2688 }
2689 }
2690 if (Worklist.empty())
2691 return false;
2692
2693 std::string ServiceFunName = SPIRV_BACKEND_SERVICE_FUN_NAME;
2694 if (!getVacantFunctionName(M, ServiceFunName))
2696 "cannot allocate a name for the internal service function");
2697 LLVMContext &Ctx = M.getContext();
2698 Function *SF =
2699 Function::Create(FunctionType::get(Type::getVoidTy(Ctx), {}, false),
2700 GlobalValue::PrivateLinkage, ServiceFunName, M);
2702 BasicBlock *BB = BasicBlock::Create(Ctx, "entry", SF);
2703 IRBuilder<> IRB(BB);
2704
2705 for (Function *F : Worklist) {
2707 for (const auto &Arg : F->args())
2708 Args.push_back(getNormalizedPoisonValue(Arg.getType()));
2709 IRB.CreateCall(F, Args);
2710 }
2711 IRB.CreateRetVoid();
2712
2713 return true;
2714}
2715
2716// Apply types parsed from demangled function declarations.
2717void SPIRVEmitIntrinsics::applyDemangledPtrArgTypes(IRBuilder<> &B) {
2718 DenseMap<Function *, CallInst *> Ptrcasts;
2719 for (auto It : FDeclPtrTys) {
2720 Function *F = It.first;
2721 for (auto *U : F->users()) {
2722 CallInst *CI = dyn_cast<CallInst>(U);
2723 if (!CI || CI->getCalledFunction() != F)
2724 continue;
2725 unsigned Sz = CI->arg_size();
2726 for (auto [Idx, ElemTy] : It.second) {
2727 if (Idx >= Sz)
2728 continue;
2729 Value *Param = CI->getArgOperand(Idx);
2730 if (GR->findDeducedElementType(Param) || isa<GlobalValue>(Param))
2731 continue;
2732 if (Argument *Arg = dyn_cast<Argument>(Param)) {
2733 if (!hasPointeeTypeAttr(Arg)) {
2734 B.SetInsertPointPastAllocas(Arg->getParent());
2735 B.SetCurrentDebugLocation(DebugLoc());
2736 GR->buildAssignPtr(B, ElemTy, Arg);
2737 }
2738 } else if (isa<GetElementPtrInst>(Param)) {
2739 replaceUsesOfWithSpvPtrcast(Param, normalizeType(ElemTy), CI,
2740 Ptrcasts);
2741 } else if (isa<Instruction>(Param)) {
2742 GR->addDeducedElementType(Param, normalizeType(ElemTy));
2743 // insertAssignTypeIntrs() will complete buildAssignPtr()
2744 } else {
2745 B.SetInsertPoint(CI->getParent()
2746 ->getParent()
2747 ->getEntryBlock()
2748 .getFirstNonPHIOrDbgOrAlloca());
2749 GR->buildAssignPtr(B, ElemTy, Param);
2750 }
2751 CallInst *Ref = dyn_cast<CallInst>(Param);
2752 if (!Ref)
2753 continue;
2754 Function *RefF = Ref->getCalledFunction();
2755 if (!RefF || !isPointerTy(RefF->getReturnType()) ||
2756 GR->findDeducedElementType(RefF))
2757 continue;
2758 ElemTy = normalizeType(ElemTy);
2759 GR->addDeducedElementType(RefF, ElemTy);
2760 GR->addReturnType(
2762 ElemTy, getPointerAddressSpace(RefF->getReturnType())));
2763 }
2764 }
2765 }
2766}
2767
2768GetElementPtrInst *
2769SPIRVEmitIntrinsics::simplifyZeroLengthArrayGepInst(GetElementPtrInst *GEP) {
2770 // getelementptr [0 x T], P, 0 (zero), I -> getelementptr T, P, I.
2771 // If type is 0-length array and first index is 0 (zero), drop both the
2772 // 0-length array type and the first index. This is a common pattern in
2773 // the IR, e.g. when using a zero-length array as a placeholder for a
2774 // flexible array such as unbound arrays.
2775 assert(GEP && "GEP is null");
2776 Type *SrcTy = GEP->getSourceElementType();
2777 SmallVector<Value *, 8> Indices(GEP->indices());
2778 ArrayType *ArrTy = dyn_cast<ArrayType>(SrcTy);
2779 if (ArrTy && ArrTy->getNumElements() == 0 &&
2781 IRBuilder<> Builder(GEP);
2782 Indices.erase(Indices.begin());
2783 SrcTy = ArrTy->getElementType();
2784 Value *NewGEP = Builder.CreateGEP(SrcTy, GEP->getPointerOperand(), Indices,
2785 "", GEP->getNoWrapFlags());
2786 assert(llvm::isa<GetElementPtrInst>(NewGEP) && "NewGEP should be a GEP");
2787 return cast<GetElementPtrInst>(NewGEP);
2788 }
2789 return nullptr;
2790}
2791
2792bool SPIRVEmitIntrinsics::runOnFunction(Function &Func) {
2793 if (Func.isDeclaration())
2794 return false;
2795
2796 const SPIRVSubtarget &ST = TM->getSubtarget<SPIRVSubtarget>(Func);
2797 GR = ST.getSPIRVGlobalRegistry();
2798
2799 if (!CurrF)
2800 HaveFunPtrs =
2801 ST.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers);
2802
2803 CurrF = &Func;
2804 IRBuilder<> B(Func.getContext());
2805 AggrConsts.clear();
2806 AggrConstTypes.clear();
2807 AggrStores.clear();
2808
2809 // Fix GEP result types ahead of inference, and simplify if possible.
2810 // Data structure for dead instructions that were simplified and replaced.
2811 SmallPtrSet<Instruction *, 4> DeadInsts;
2812 for (auto &I : instructions(Func)) {
2814 if (!Ref || GR->findDeducedElementType(Ref))
2815 continue;
2816
2817 GetElementPtrInst *NewGEP = simplifyZeroLengthArrayGepInst(Ref);
2818 if (NewGEP) {
2819 Ref->replaceAllUsesWith(NewGEP);
2820 DeadInsts.insert(Ref);
2821 Ref = NewGEP;
2822 }
2823 if (Type *GepTy = getGEPType(Ref))
2824 GR->addDeducedElementType(Ref, normalizeType(GepTy));
2825 }
2826 // Remove dead instructions that were simplified and replaced.
2827 for (auto *I : DeadInsts) {
2828 assert(I->use_empty() && "Dead instruction should not have any uses left");
2829 I->eraseFromParent();
2830 }
2831
2832 processParamTypesByFunHeader(CurrF, B);
2833
2834 // StoreInst's operand type can be changed during the next
2835 // transformations, so we need to store it in the set. Also store already
2836 // transformed types.
2837 for (auto &I : instructions(Func)) {
2838 StoreInst *SI = dyn_cast<StoreInst>(&I);
2839 if (!SI)
2840 continue;
2841 Type *ElTy = SI->getValueOperand()->getType();
2842 if (ElTy->isAggregateType() || ElTy->isVectorTy())
2843 AggrStores.insert(&I);
2844 }
2845
2846 B.SetInsertPoint(&Func.getEntryBlock(), Func.getEntryBlock().begin());
2847 for (auto &GV : Func.getParent()->globals())
2848 processGlobalValue(GV, B);
2849
2850 preprocessUndefs(B);
2851 preprocessCompositeConstants(B);
2854
2855 applyDemangledPtrArgTypes(B);
2856
2857 // Pass forward: use operand to deduce instructions result.
2858 for (auto &I : Worklist) {
2859 // Don't emit intrinsincs for convergence intrinsics.
2860 if (isConvergenceIntrinsic(I))
2861 continue;
2862
2863 bool Postpone = insertAssignPtrTypeIntrs(I, B, false);
2864 // if Postpone is true, we can't decide on pointee type yet
2865 insertAssignTypeIntrs(I, B);
2866 insertPtrCastOrAssignTypeInstr(I, B);
2868 // if instruction requires a pointee type set, let's check if we know it
2869 // already, and force it to be i8 if not
2870 if (Postpone && !GR->findAssignPtrTypeInstr(I))
2871 insertAssignPtrTypeIntrs(I, B, true);
2872
2873 if (auto *FPI = dyn_cast<ConstrainedFPIntrinsic>(I))
2874 useRoundingMode(FPI, B);
2875 }
2876
2877 // Pass backward: use instructions results to specify/update/cast operands
2878 // where needed.
2879 SmallPtrSet<Instruction *, 4> IncompleteRets;
2880 for (auto &I : llvm::reverse(instructions(Func)))
2881 deduceOperandElementType(&I, &IncompleteRets);
2882
2883 // Pass forward for PHIs only, their operands are not preceed the
2884 // instruction in meaning of `instructions(Func)`.
2885 for (BasicBlock &BB : Func)
2886 for (PHINode &Phi : BB.phis())
2887 if (isPointerTy(Phi.getType()))
2888 deduceOperandElementType(&Phi, nullptr);
2889
2890 for (auto *I : Worklist) {
2891 TrackConstants = true;
2892 if (!I->getType()->isVoidTy() || isa<StoreInst>(I))
2894 // Visitors return either the original/newly created instruction for
2895 // further processing, nullptr otherwise.
2896 I = visit(*I);
2897 if (!I)
2898 continue;
2899
2900 // Don't emit intrinsics for convergence operations.
2901 if (isConvergenceIntrinsic(I))
2902 continue;
2903
2905 processInstrAfterVisit(I, B);
2906 }
2907
2908 return true;
2909}
2910
2911// Try to deduce a better type for pointers to untyped ptr.
2912bool SPIRVEmitIntrinsics::postprocessTypes(Module &M) {
2913 if (!GR || TodoTypeSz == 0)
2914 return false;
2915
2916 unsigned SzTodo = TodoTypeSz;
2917 DenseMap<Value *, SmallPtrSet<Value *, 4>> ToProcess;
2918 for (auto [Op, Enabled] : TodoType) {
2919 // TODO: add isa<CallInst>(Op) to continue
2921 continue;
2922 CallInst *AssignCI = GR->findAssignPtrTypeInstr(Op);
2923 Type *KnownTy = GR->findDeducedElementType(Op);
2924 if (!KnownTy || !AssignCI)
2925 continue;
2926 assert(Op == AssignCI->getArgOperand(0));
2927 // Try to improve the type deduced after all Functions are processed.
2928 if (auto *CI = dyn_cast<Instruction>(Op)) {
2929 CurrF = CI->getParent()->getParent();
2930 std::unordered_set<Value *> Visited;
2931 if (Type *ElemTy = deduceElementTypeHelper(Op, Visited, false, true)) {
2932 if (ElemTy != KnownTy) {
2933 DenseSet<std::pair<Value *, Value *>> VisitedSubst;
2934 propagateElemType(CI, ElemTy, VisitedSubst);
2935 eraseTodoType(Op);
2936 continue;
2937 }
2938 }
2939 }
2940
2941 if (Op->hasUseList()) {
2942 for (User *U : Op->users()) {
2944 if (Inst && !isa<IntrinsicInst>(Inst))
2945 ToProcess[Inst].insert(Op);
2946 }
2947 }
2948 }
2949 if (TodoTypeSz == 0)
2950 return true;
2951
2952 for (auto &F : M) {
2953 CurrF = &F;
2954 SmallPtrSet<Instruction *, 4> IncompleteRets;
2955 for (auto &I : llvm::reverse(instructions(F))) {
2956 auto It = ToProcess.find(&I);
2957 if (It == ToProcess.end())
2958 continue;
2959 It->second.remove_if([this](Value *V) { return !isTodoType(V); });
2960 if (It->second.size() == 0)
2961 continue;
2962 deduceOperandElementType(&I, &IncompleteRets, &It->second, true);
2963 if (TodoTypeSz == 0)
2964 return true;
2965 }
2966 }
2967
2968 return SzTodo > TodoTypeSz;
2969}
2970
2971// Parse and store argument types of function declarations where needed.
2972void SPIRVEmitIntrinsics::parseFunDeclarations(Module &M) {
2973 for (auto &F : M) {
2974 if (!F.isDeclaration() || F.isIntrinsic())
2975 continue;
2976 // get the demangled name
2977 std::string DemangledName = getOclOrSpirvBuiltinDemangledName(F.getName());
2978 if (DemangledName.empty())
2979 continue;
2980 // allow only OpGroupAsyncCopy use case at the moment
2981 const SPIRVSubtarget &ST = TM->getSubtarget<SPIRVSubtarget>(F);
2982 auto [Grp, Opcode, ExtNo] = SPIRV::mapBuiltinToOpcode(
2983 DemangledName, ST.getPreferredInstructionSet());
2984 if (Opcode != SPIRV::OpGroupAsyncCopy)
2985 continue;
2986 // find pointer arguments
2987 SmallVector<unsigned> Idxs;
2988 for (unsigned OpIdx = 0; OpIdx < F.arg_size(); ++OpIdx) {
2989 Argument *Arg = F.getArg(OpIdx);
2990 if (isPointerTy(Arg->getType()) && !hasPointeeTypeAttr(Arg))
2991 Idxs.push_back(OpIdx);
2992 }
2993 if (!Idxs.size())
2994 continue;
2995 // parse function arguments
2996 LLVMContext &Ctx = F.getContext();
2998 SPIRV::parseBuiltinTypeStr(TypeStrs, DemangledName, Ctx);
2999 if (!TypeStrs.size())
3000 continue;
3001 // find type info for pointer arguments
3002 for (unsigned Idx : Idxs) {
3003 if (Idx >= TypeStrs.size())
3004 continue;
3005 if (Type *ElemTy =
3006 SPIRV::parseBuiltinCallArgumentType(TypeStrs[Idx].trim(), Ctx))
3008 !ElemTy->isTargetExtTy())
3009 FDeclPtrTys[&F].push_back(std::make_pair(Idx, ElemTy));
3010 }
3011 }
3012}
3013
3014bool SPIRVEmitIntrinsics::runOnModule(Module &M) {
3015 bool Changed = false;
3016
3017 parseFunDeclarations(M);
3018 insertConstantsForFPFastMathDefault(M);
3019
3020 TodoType.clear();
3021 for (auto &F : M)
3023
3024 // Specify function parameters after all functions were processed.
3025 for (auto &F : M) {
3026 // check if function parameter types are set
3027 CurrF = &F;
3028 if (!F.isDeclaration() && !F.isIntrinsic()) {
3029 IRBuilder<> B(F.getContext());
3030 processParamTypes(&F, B);
3031 }
3032 }
3033
3034 CanTodoType = false;
3035 Changed |= postprocessTypes(M);
3036
3037 if (HaveFunPtrs)
3038 Changed |= processFunctionPointers(M);
3039
3040 return Changed;
3041}
3042
3044 return new SPIRVEmitIntrinsics(TM);
3045}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
aarch64 promote const
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
always inline
Expand Atomic instructions
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
static void replaceAllUsesWith(Value *Old, Value *New, SmallPtrSet< BasicBlock *, 32 > &FreshBBs, bool IsHuge)
Replace all old uses with new ones, and push the updated BBs into FreshBBs.
This file defines the DenseSet and SmallDenseSet classes.
static bool runOnFunction(Function &F, bool PostInlining)
Hexagon Common GEP
iv Induction Variable Users
Definition IVUsers.cpp:48
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Check Debug Module
#define T
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
Function * Fun
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
static unsigned getNumElements(Type *Ty)
static bool isMemInstrToReplace(Instruction *I)
static bool isAggrConstForceInt32(const Value *V)
static SPIRV::FPFastMathDefaultInfoVector & getOrCreateFPFastMathDefaultInfoVec(const Module &M, DenseMap< Function *, SPIRV::FPFastMathDefaultInfoVector > &FPFastMathDefaultInfoMap, Function *F)
static Type * getAtomicElemTy(SPIRVGlobalRegistry *GR, Instruction *I, Value *PointerOperand)
static void reportFatalOnTokenType(const Instruction *I)
static void setInsertPointAfterDef(IRBuilder<> &B, Instruction *I)
static void emitAssignName(Instruction *I, IRBuilder<> &B)
static Type * getPointeeTypeByCallInst(StringRef DemangledName, Function *CalledF, unsigned OpIdx)
static void createRoundingModeDecoration(Instruction *I, unsigned RoundingModeDeco, IRBuilder<> &B)
static void createDecorationIntrinsic(Instruction *I, MDNode *Node, IRBuilder<> &B)
static SPIRV::FPFastMathDefaultInfo & getFPFastMathDefaultInfo(SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec, const Type *Ty)
static bool IsKernelArgInt8(Function *F, StoreInst *SI)
static void addSaturatedDecorationToIntrinsic(Instruction *I, IRBuilder<> &B)
static void setInsertPointSkippingPhis(IRBuilder<> &B, Instruction *I)
static FunctionType * getFunctionPointerElemType(Function *F, SPIRVGlobalRegistry *GR)
static void createSaturatedConversionDecoration(Instruction *I, IRBuilder<> &B)
static Type * restoreMutatedType(SPIRVGlobalRegistry *GR, Instruction *I, Type *Ty)
static bool requireAssignType(Instruction *I)
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
static void insertSpirvDecorations(MachineFunction &MF, SPIRVGlobalRegistry *GR, MachineIRBuilder MIB)
#define SPIRV_BACKEND_SERVICE_FUN_NAME
Definition SPIRVUtils.h:503
StringSet - A set-like wrapper for the StringMap.
DEMANGLE_NAMESPACE_BEGIN bool starts_with(std::string_view self, char C) noexcept
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
const Function * getParent() const
Definition Argument.h:44
static unsigned getPointerOperandIndex()
static unsigned getPointerOperandIndex()
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:528
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
static LLVM_ABI BlockAddress * get(Function *F, BasicBlock *BB)
Return a BlockAddress for the specified function and basic block.
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
FunctionType * getFunctionType() const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static ConstantAsMetadata * get(Constant *C)
Definition Metadata.h:536
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:163
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:256
iterator end()
Definition DenseMap.h:81
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
void addFnAttr(Attribute::AttrKind Kind)
Add function attributes to this function.
Definition Function.cpp:640
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition Function.h:166
const DataLayout & getDataLayout() const
Get the data layout of the module this function belongs to.
Definition Function.cpp:363
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition Function.h:244
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
Definition Function.h:249
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
size_t arg_size() const
Definition Function.h:899
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:214
Argument * getArg(unsigned i) const
Definition Function.h:884
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static unsigned getPointerOperandIndex()
PointerType * getType() const
Global values are always pointers.
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition GlobalValue.h:61
@ InternalLinkage
Rename collisions when linking (static functions).
Definition GlobalValue.h:60
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2788
LLVM_ABI void addDestination(BasicBlock *Dest)
Add a destination.
Base class for instruction visitors.
Definition InstVisitor.h:78
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
static unsigned getPointerOperandIndex()
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1569
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1448
static LLVM_ABI MDString * get(LLVMContext &Context, StringRef Str)
Definition Metadata.cpp:608
Flags
Flags values. These may be or'd together.
static LLVM_ABI MetadataAsValue * get(LLVMContext &Context, Metadata *MD)
Definition Metadata.cpp:104
Metadata * getMetadata() const
Definition Metadata.h:201
ModulePass class - This class is used to implement unstructured interprocedural optimizations and ana...
Definition Pass.h:255
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
void addAssignPtrTypeInstr(Value *Val, CallInst *AssignPtrTyCI)
void buildAssignPtr(IRBuilder<> &B, Type *ElemTy, Value *Arg)
Type * findDeducedCompositeType(const Value *Val)
void replaceAllUsesWith(Value *Old, Value *New, bool DeleteOld=true)
void addDeducedElementType(Value *Val, Type *Ty)
void addReturnType(const Function *ArgF, TypedPointerType *DerivedTy)
Type * findMutated(const Value *Val)
void addDeducedCompositeType(Value *Val, Type *Ty)
void buildAssignType(IRBuilder<> &B, Type *Ty, Value *Arg)
Type * findDeducedElementType(const Value *Val)
void updateAssignType(CallInst *AssignCI, Value *Arg, Value *OfType)
CallInst * findAssignPtrTypeInstr(const Value *Val)
const SPIRVTargetLowering * getTargetLowering() const override
bool isLogicalSPIRV() const
bool canUseExtension(SPIRV::Extension::Extension E) const
const SPIRVSubtarget * getSubtargetImpl() const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
static unsigned getPointerOperandIndex()
iterator end()
Definition StringMap.h:224
iterator find(StringRef Key)
Definition StringMap.h:237
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
bool contains(StringRef key) const
Check if the set contains the given key.
Definition StringSet.h:60
static LLVM_ABI StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Definition Type.cpp:619
static LLVM_ABI TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types={}, ArrayRef< unsigned > Ints={})
Return a target extension type having the specified name and optional type and integer parameters.
Definition Type.cpp:907
const STC & getSubtarget(const Function &F) const
This method returns a pointer to the specified type of TargetSubtargetInfo.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:264
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
Type * getArrayElementType() const
Definition Type.h:408
LLVM_ABI StringRef getTargetExtName() const
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:294
bool isStructTy() const
True if this is an instance of StructType.
Definition Type.h:261
bool isTargetExtTy() const
Return true if this is a target extension type.
Definition Type.h:203
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition Type.h:304
static LLVM_ABI Type * getDoubleTy(LLVMContext &C)
Definition Type.cpp:285
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
Definition Type.cpp:284
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
Definition Type.cpp:282
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
static LLVM_ABI bool isValidElementType(Type *ElemTy)
Return true if the specified type is valid as a element type.
static LLVM_ABI TypedPointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
op_range operands()
Definition User.h:292
void setOperand(unsigned i, Value *Val)
Definition User.h:237
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition User.cpp:24
Value * getOperand(unsigned i) const
Definition User.h:232
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:390
iterator_range< user_iterator > users()
Definition Value.h:426
bool use_empty() const
Definition Value.h:346
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
bool user_empty() const
Definition Value.h:389
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:202
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition DenseSet.h:175
const ParentTy * getParent() const
Definition ilist_node.h:34
CallInst * Call
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
bool match(Val *V, const Pattern &P)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
DenseSetImpl< ValueT, DenseMap< ValueT, DenseSetEmpty, ValueInfoT, DenseSetPair< ValueT > >, ValueInfoT > DenseSet
Definition DenseSet.h:264
ElementType
The element type of an SRV or UAV resource.
Definition DXILABI.h:60
@ User
could "use" a pointer
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
NodeAddr< FuncNode * > Func
Definition RDFGraph.h:393
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
bool getVacantFunctionName(Module &M, std::string &Name)
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool isTypedPointerWrapper(const TargetExtType *ExtTy)
Definition SPIRVUtils.h:381
ModulePass * createSPIRVEmitIntrinsicsPass(SPIRVTargetMachine *TM)
unsigned getPointerAddressSpace(const Type *T)
Definition SPIRVUtils.h:345
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
CallInst * buildIntrWithMD(Intrinsic::ID IntrID, ArrayRef< Type * > Types, Value *Arg, Value *Arg2, ArrayRef< Constant * > Imms, IRBuilder<> &B)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2136
FPDecorationId
Definition SPIRVUtils.h:527
bool isNestedPointer(const Type *Ty)
MetadataAsValue * buildMD(Value *Arg)
Definition SPIRVUtils.h:491
std::string getOclOrSpirvBuiltinDemangledName(StringRef Name)
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
Type * getTypedPointerWrapper(Type *ElemTy, unsigned AS)
Definition SPIRVUtils.h:376
bool isVector1(Type *Ty)
Definition SPIRVUtils.h:469
bool isPointerTy(const Type *T)
Definition SPIRVUtils.h:339
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
@ Ref
The access may reference the value stored in memory.
Definition ModRef.h:32
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
DWARFExpression::Operation Op
Type * getPointeeTypeByAttr(Argument *Arg)
Definition SPIRVUtils.h:358
bool hasPointeeTypeAttr(Argument *Arg)
Definition SPIRVUtils.h:353
constexpr unsigned BitWidth
bool isEquivalentTypes(Type *Ty1, Type *Ty2)
Definition SPIRVUtils.h:431
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition iterator.h:363
bool hasInitializer(const GlobalVariable *GV)
Definition SPIRVUtils.h:324
Type * normalizeType(Type *Ty)
Definition SPIRVUtils.h:477
@ Enabled
Convert any .debug_str_offsets tables to DWARF64 if needed.
Definition DWP.h:27
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
Type * getPointeeType(const Type *Ty)
Definition SPIRVUtils.h:408
PoisonValue * getNormalizedPoisonValue(Type *Ty)
Definition SPIRVUtils.h:487
bool isUntypedPointerTy(const Type *T)
Definition SPIRVUtils.h:334
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
static size_t computeFPFastMathDefaultInfoVecIndex(size_t BitWidth)
Definition SPIRVUtils.h:146