LLVM 22.0.0git
SPIRVEmitIntrinsics.cpp
Go to the documentation of this file.
1//===-- SPIRVEmitIntrinsics.cpp - emit SPIRV intrinsics ---------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The pass emits SPIRV intrinsics keeping essential high-level information for
10// the translation of LLVM IR to SPIR-V.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SPIRV.h"
15#include "SPIRVBuiltins.h"
16#include "SPIRVSubtarget.h"
17#include "SPIRVTargetMachine.h"
18#include "SPIRVUtils.h"
19#include "llvm/ADT/DenseSet.h"
20#include "llvm/ADT/StringSet.h"
21#include "llvm/IR/IRBuilder.h"
23#include "llvm/IR/InstVisitor.h"
24#include "llvm/IR/IntrinsicsSPIRV.h"
28
29#include <cassert>
30#include <queue>
31#include <unordered_set>
32
33// This pass performs the following transformation on LLVM IR level required
34// for the following translation to SPIR-V:
35// - replaces direct usages of aggregate constants with target-specific
36// intrinsics;
37// - replaces aggregates-related instructions (extract/insert, ld/st, etc)
38// with a target-specific intrinsics;
39// - emits intrinsics for the global variable initializers since IRTranslator
40// doesn't handle them and it's not very convenient to translate them
41// ourselves;
42// - emits intrinsics to keep track of the string names assigned to the values;
43// - emits intrinsics to keep track of constants (this is necessary to have an
44// LLVM IR constant after the IRTranslation is completed) for their further
45// deduplication;
46// - emits intrinsics to keep track of original LLVM types of the values
47// to be able to emit proper SPIR-V types eventually.
48//
49// TODO: consider removing spv.track.constant in favor of spv.assign.type.
50
51using namespace llvm;
52
53namespace llvm::SPIRV {
54#define GET_BuiltinGroup_DECL
55#include "SPIRVGenTables.inc"
56} // namespace llvm::SPIRV
57
58namespace {
59
60class SPIRVEmitIntrinsics
61 : public ModulePass,
62 public InstVisitor<SPIRVEmitIntrinsics, Instruction *> {
63 SPIRVTargetMachine *TM = nullptr;
64 SPIRVGlobalRegistry *GR = nullptr;
65 Function *CurrF = nullptr;
66 bool TrackConstants = true;
67 bool HaveFunPtrs = false;
68 DenseMap<Instruction *, Constant *> AggrConsts;
69 DenseMap<Instruction *, Type *> AggrConstTypes;
70 DenseSet<Instruction *> AggrStores;
71 std::unordered_set<Value *> Named;
72
73 // map of function declarations to <pointer arg index => element type>
74 DenseMap<Function *, SmallVector<std::pair<unsigned, Type *>>> FDeclPtrTys;
75
76 // a register of Instructions that don't have a complete type definition
77 bool CanTodoType = true;
78 unsigned TodoTypeSz = 0;
79 DenseMap<Value *, bool> TodoType;
80 void insertTodoType(Value *Op) {
81 // TODO: add isa<CallInst>(Op) to no-insert
82 if (CanTodoType && !isa<GetElementPtrInst>(Op)) {
83 auto It = TodoType.try_emplace(Op, true);
84 if (It.second)
85 ++TodoTypeSz;
86 }
87 }
88 void eraseTodoType(Value *Op) {
89 auto It = TodoType.find(Op);
90 if (It != TodoType.end() && It->second) {
91 It->second = false;
92 --TodoTypeSz;
93 }
94 }
95 bool isTodoType(Value *Op) {
97 return false;
98 auto It = TodoType.find(Op);
99 return It != TodoType.end() && It->second;
100 }
101 // a register of Instructions that were visited by deduceOperandElementType()
102 // to validate operand types with an instruction
103 std::unordered_set<Instruction *> TypeValidated;
104
105 // well known result types of builtins
106 enum WellKnownTypes { Event };
107
108 // deduce element type of untyped pointers
109 Type *deduceElementType(Value *I, bool UnknownElemTypeI8);
110 Type *deduceElementTypeHelper(Value *I, bool UnknownElemTypeI8);
111 Type *deduceElementTypeHelper(Value *I, std::unordered_set<Value *> &Visited,
112 bool UnknownElemTypeI8,
113 bool IgnoreKnownType = false);
114 Type *deduceElementTypeByValueDeep(Type *ValueTy, Value *Operand,
115 bool UnknownElemTypeI8);
116 Type *deduceElementTypeByValueDeep(Type *ValueTy, Value *Operand,
117 std::unordered_set<Value *> &Visited,
118 bool UnknownElemTypeI8);
119 Type *deduceElementTypeByUsersDeep(Value *Op,
120 std::unordered_set<Value *> &Visited,
121 bool UnknownElemTypeI8);
122 void maybeAssignPtrType(Type *&Ty, Value *I, Type *RefTy,
123 bool UnknownElemTypeI8);
124
125 // deduce nested types of composites
126 Type *deduceNestedTypeHelper(User *U, bool UnknownElemTypeI8);
127 Type *deduceNestedTypeHelper(User *U, Type *Ty,
128 std::unordered_set<Value *> &Visited,
129 bool UnknownElemTypeI8);
130
131 // deduce Types of operands of the Instruction if possible
132 void deduceOperandElementType(Instruction *I,
133 SmallPtrSet<Instruction *, 4> *IncompleteRets,
134 const SmallPtrSet<Value *, 4> *AskOps = nullptr,
135 bool IsPostprocessing = false);
136
137 void preprocessCompositeConstants(IRBuilder<> &B);
138 void preprocessUndefs(IRBuilder<> &B);
139
140 Type *reconstructType(Value *Op, bool UnknownElemTypeI8,
141 bool IsPostprocessing);
142
143 void replaceMemInstrUses(Instruction *Old, Instruction *New, IRBuilder<> &B);
144 void processInstrAfterVisit(Instruction *I, IRBuilder<> &B);
145 bool insertAssignPtrTypeIntrs(Instruction *I, IRBuilder<> &B,
146 bool UnknownElemTypeI8);
147 void insertAssignTypeIntrs(Instruction *I, IRBuilder<> &B);
148 void insertAssignPtrTypeTargetExt(TargetExtType *AssignedType, Value *V,
149 IRBuilder<> &B);
150 void replacePointerOperandWithPtrCast(Instruction *I, Value *Pointer,
151 Type *ExpectedElementType,
152 unsigned OperandToReplace,
153 IRBuilder<> &B);
154 void insertPtrCastOrAssignTypeInstr(Instruction *I, IRBuilder<> &B);
155 bool shouldTryToAddMemAliasingDecoration(Instruction *Inst);
157 void insertConstantsForFPFastMathDefault(Module &M);
158 void processGlobalValue(GlobalVariable &GV, IRBuilder<> &B);
159 void processParamTypes(Function *F, IRBuilder<> &B);
160 void processParamTypesByFunHeader(Function *F, IRBuilder<> &B);
161 Type *deduceFunParamElementType(Function *F, unsigned OpIdx);
162 Type *deduceFunParamElementType(Function *F, unsigned OpIdx,
163 std::unordered_set<Function *> &FVisited);
164
165 bool deduceOperandElementTypeCalledFunction(
166 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
167 Type *&KnownElemTy, bool &Incomplete);
168 void deduceOperandElementTypeFunctionPointer(
169 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
170 Type *&KnownElemTy, bool IsPostprocessing);
171 bool deduceOperandElementTypeFunctionRet(
172 Instruction *I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
173 const SmallPtrSet<Value *, 4> *AskOps, bool IsPostprocessing,
174 Type *&KnownElemTy, Value *Op, Function *F);
175
176 CallInst *buildSpvPtrcast(Function *F, Value *Op, Type *ElemTy);
177 void replaceUsesOfWithSpvPtrcast(Value *Op, Type *ElemTy, Instruction *I,
178 DenseMap<Function *, CallInst *> Ptrcasts);
179 void propagateElemType(Value *Op, Type *ElemTy,
180 DenseSet<std::pair<Value *, Value *>> &VisitedSubst);
181 void
182 propagateElemTypeRec(Value *Op, Type *PtrElemTy, Type *CastElemTy,
183 DenseSet<std::pair<Value *, Value *>> &VisitedSubst);
184 void propagateElemTypeRec(Value *Op, Type *PtrElemTy, Type *CastElemTy,
185 DenseSet<std::pair<Value *, Value *>> &VisitedSubst,
186 std::unordered_set<Value *> &Visited,
187 DenseMap<Function *, CallInst *> Ptrcasts);
188
189 void replaceAllUsesWith(Value *Src, Value *Dest, bool DeleteOld = true);
190 void replaceAllUsesWithAndErase(IRBuilder<> &B, Instruction *Src,
191 Instruction *Dest, bool DeleteOld = true);
192
193 void applyDemangledPtrArgTypes(IRBuilder<> &B);
194
195 GetElementPtrInst *simplifyZeroLengthArrayGepInst(GetElementPtrInst *GEP);
196
197 bool runOnFunction(Function &F);
198 bool postprocessTypes(Module &M);
199 bool processFunctionPointers(Module &M);
200 void parseFunDeclarations(Module &M);
201
202 void useRoundingMode(ConstrainedFPIntrinsic *FPI, IRBuilder<> &B);
203
204 // Tries to walk the type accessed by the given GEP instruction.
205 // For each nested type access, one of the 2 callbacks is called:
206 // - OnLiteralIndexing when the index is a known constant value.
207 // Parameters:
208 // PointedType: the pointed type resulting of this indexing.
209 // If the parent type is an array, this is the index in the array.
210 // If the parent type is a struct, this is the field index.
211 // Index: index of the element in the parent type.
212 // - OnDynamnicIndexing when the index is a non-constant value.
213 // This callback is only called when indexing into an array.
214 // Parameters:
215 // ElementType: the type of the elements stored in the parent array.
216 // Offset: the Value* containing the byte offset into the array.
217 // Return true if an error occured during the walk, false otherwise.
218 bool walkLogicalAccessChain(
219 GetElementPtrInst &GEP,
220 const std::function<void(Type *PointedType, uint64_t Index)>
221 &OnLiteralIndexing,
222 const std::function<void(Type *ElementType, Value *Offset)>
223 &OnDynamicIndexing);
224
225 // Returns the type accessed using the given GEP instruction by relying
226 // on the GEP type.
227 // FIXME: GEP types are not supposed to be used to retrieve the pointed
228 // type. This must be fixed.
229 Type *getGEPType(GetElementPtrInst *GEP);
230
231 // Returns the type accessed using the given GEP instruction by walking
232 // the source type using the GEP indices.
233 // FIXME: without help from the frontend, this method cannot reliably retrieve
234 // the stored type, nor can robustly determine the depth of the type
235 // we are accessing.
236 Type *getGEPTypeLogical(GetElementPtrInst *GEP);
237
238 Instruction *buildLogicalAccessChainFromGEP(GetElementPtrInst &GEP);
239
240public:
241 static char ID;
242 SPIRVEmitIntrinsics(SPIRVTargetMachine *TM = nullptr)
243 : ModulePass(ID), TM(TM) {}
244 Instruction *visitInstruction(Instruction &I) { return &I; }
245 Instruction *visitSwitchInst(SwitchInst &I);
246 Instruction *visitGetElementPtrInst(GetElementPtrInst &I);
247 Instruction *visitBitCastInst(BitCastInst &I);
248 Instruction *visitInsertElementInst(InsertElementInst &I);
249 Instruction *visitExtractElementInst(ExtractElementInst &I);
250 Instruction *visitInsertValueInst(InsertValueInst &I);
251 Instruction *visitExtractValueInst(ExtractValueInst &I);
252 Instruction *visitLoadInst(LoadInst &I);
253 Instruction *visitStoreInst(StoreInst &I);
254 Instruction *visitAllocaInst(AllocaInst &I);
255 Instruction *visitAtomicCmpXchgInst(AtomicCmpXchgInst &I);
256 Instruction *visitUnreachableInst(UnreachableInst &I);
257 Instruction *visitCallInst(CallInst &I);
258
259 StringRef getPassName() const override { return "SPIRV emit intrinsics"; }
260
261 bool runOnModule(Module &M) override;
262
263 void getAnalysisUsage(AnalysisUsage &AU) const override {
264 ModulePass::getAnalysisUsage(AU);
265 }
266};
267
268bool isConvergenceIntrinsic(const Instruction *I) {
269 const auto *II = dyn_cast<IntrinsicInst>(I);
270 if (!II)
271 return false;
272
273 return II->getIntrinsicID() == Intrinsic::experimental_convergence_entry ||
274 II->getIntrinsicID() == Intrinsic::experimental_convergence_loop ||
275 II->getIntrinsicID() == Intrinsic::experimental_convergence_anchor;
276}
277
278bool expectIgnoredInIRTranslation(const Instruction *I) {
279 const auto *II = dyn_cast<IntrinsicInst>(I);
280 if (!II)
281 return false;
282 switch (II->getIntrinsicID()) {
283 case Intrinsic::invariant_start:
284 case Intrinsic::spv_resource_handlefrombinding:
285 case Intrinsic::spv_resource_getpointer:
286 return true;
287 default:
288 return false;
289 }
290}
291
292// Returns the source pointer from `I` ignoring intermediate ptrcast.
293Value *getPointerRoot(Value *I) {
294 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
295 if (II->getIntrinsicID() == Intrinsic::spv_ptrcast) {
296 Value *V = II->getArgOperand(0);
297 return getPointerRoot(V);
298 }
299 }
300 return I;
301}
302
303} // namespace
304
305char SPIRVEmitIntrinsics::ID = 0;
306
307INITIALIZE_PASS(SPIRVEmitIntrinsics, "emit-intrinsics", "SPIRV emit intrinsics",
308 false, false)
309
310static inline bool isAssignTypeInstr(const Instruction *I) {
311 return isa<IntrinsicInst>(I) &&
312 cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::spv_assign_type;
313}
314
319
320static bool isAggrConstForceInt32(const Value *V) {
321 return isa<ConstantArray>(V) || isa<ConstantStruct>(V) ||
323 (isa<ConstantAggregateZero>(V) && !V->getType()->isVectorTy());
324}
325
327 if (isa<PHINode>(I))
328 B.SetInsertPoint(I->getParent()->getFirstNonPHIOrDbgOrAlloca());
329 else
330 B.SetInsertPoint(I);
331}
332
334 B.SetCurrentDebugLocation(I->getDebugLoc());
335 if (I->getType()->isVoidTy())
336 B.SetInsertPoint(I->getNextNode());
337 else
338 B.SetInsertPoint(*I->getInsertionPointAfterDef());
339}
340
342 if (const auto *Intr = dyn_cast<IntrinsicInst>(I)) {
343 switch (Intr->getIntrinsicID()) {
344 case Intrinsic::invariant_start:
345 case Intrinsic::invariant_end:
346 return false;
347 }
348 }
349 return true;
350}
351
352static inline void reportFatalOnTokenType(const Instruction *I) {
353 if (I->getType()->isTokenTy())
354 report_fatal_error("A token is encountered but SPIR-V without extensions "
355 "does not support token type",
356 false);
357}
358
360 if (!I->hasName() || I->getType()->isAggregateType() ||
361 expectIgnoredInIRTranslation(I))
362 return;
365 LLVMContext &Ctx = I->getContext();
366 std::vector<Value *> Args = {
368 Ctx, MDNode::get(Ctx, MDString::get(Ctx, I->getName())))};
369 B.CreateIntrinsic(Intrinsic::spv_assign_name, {I->getType()}, Args);
370}
371
372void SPIRVEmitIntrinsics::replaceAllUsesWith(Value *Src, Value *Dest,
373 bool DeleteOld) {
374 GR->replaceAllUsesWith(Src, Dest, DeleteOld);
375 // Update uncomplete type records if any
376 if (isTodoType(Src)) {
377 if (DeleteOld)
378 eraseTodoType(Src);
379 insertTodoType(Dest);
380 }
381}
382
383void SPIRVEmitIntrinsics::replaceAllUsesWithAndErase(IRBuilder<> &B,
384 Instruction *Src,
385 Instruction *Dest,
386 bool DeleteOld) {
387 replaceAllUsesWith(Src, Dest, DeleteOld);
388 std::string Name = Src->hasName() ? Src->getName().str() : "";
389 Src->eraseFromParent();
390 if (!Name.empty()) {
391 Dest->setName(Name);
392 if (Named.insert(Dest).second)
393 emitAssignName(Dest, B);
394 }
395}
396
398 return SI && F->getCallingConv() == CallingConv::SPIR_KERNEL &&
399 isPointerTy(SI->getValueOperand()->getType()) &&
400 isa<Argument>(SI->getValueOperand());
401}
402
403// Maybe restore original function return type.
405 Type *Ty) {
407 if (!CI || CI->isIndirectCall() || CI->isInlineAsm() ||
409 return Ty;
410 if (Type *OriginalTy = GR->findMutated(CI->getCalledFunction()))
411 return OriginalTy;
412 return Ty;
413}
414
415// Reconstruct type with nested element types according to deduced type info.
416// Return nullptr if no detailed type info is available.
417Type *SPIRVEmitIntrinsics::reconstructType(Value *Op, bool UnknownElemTypeI8,
418 bool IsPostprocessing) {
419 Type *Ty = Op->getType();
420 if (auto *OpI = dyn_cast<Instruction>(Op))
421 Ty = restoreMutatedType(GR, OpI, Ty);
422 if (!isUntypedPointerTy(Ty))
423 return Ty;
424 // try to find the pointee type
425 if (Type *NestedTy = GR->findDeducedElementType(Op))
427 // not a pointer according to the type info (e.g., Event object)
428 CallInst *CI = GR->findAssignPtrTypeInstr(Op);
429 if (CI) {
430 MetadataAsValue *MD = cast<MetadataAsValue>(CI->getArgOperand(1));
431 return cast<ConstantAsMetadata>(MD->getMetadata())->getType();
432 }
433 if (UnknownElemTypeI8) {
434 if (!IsPostprocessing)
435 insertTodoType(Op);
436 return getTypedPointerWrapper(IntegerType::getInt8Ty(Op->getContext()),
438 }
439 return nullptr;
440}
441
442CallInst *SPIRVEmitIntrinsics::buildSpvPtrcast(Function *F, Value *Op,
443 Type *ElemTy) {
444 IRBuilder<> B(Op->getContext());
445 if (auto *OpI = dyn_cast<Instruction>(Op)) {
446 // spv_ptrcast's argument Op denotes an instruction that generates
447 // a value, and we may use getInsertionPointAfterDef()
449 } else if (auto *OpA = dyn_cast<Argument>(Op)) {
450 B.SetInsertPointPastAllocas(OpA->getParent());
451 B.SetCurrentDebugLocation(DebugLoc());
452 } else {
453 B.SetInsertPoint(F->getEntryBlock().getFirstNonPHIOrDbgOrAlloca());
454 }
455 Type *OpTy = Op->getType();
456 SmallVector<Type *, 2> Types = {OpTy, OpTy};
457 SmallVector<Value *, 2> Args = {Op, buildMD(getNormalizedPoisonValue(ElemTy)),
458 B.getInt32(getPointerAddressSpace(OpTy))};
459 CallInst *PtrCasted =
460 B.CreateIntrinsic(Intrinsic::spv_ptrcast, {Types}, Args);
461 GR->buildAssignPtr(B, ElemTy, PtrCasted);
462 return PtrCasted;
463}
464
465void SPIRVEmitIntrinsics::replaceUsesOfWithSpvPtrcast(
466 Value *Op, Type *ElemTy, Instruction *I,
467 DenseMap<Function *, CallInst *> Ptrcasts) {
468 Function *F = I->getParent()->getParent();
469 CallInst *PtrCastedI = nullptr;
470 auto It = Ptrcasts.find(F);
471 if (It == Ptrcasts.end()) {
472 PtrCastedI = buildSpvPtrcast(F, Op, ElemTy);
473 Ptrcasts[F] = PtrCastedI;
474 } else {
475 PtrCastedI = It->second;
476 }
477 I->replaceUsesOfWith(Op, PtrCastedI);
478}
479
480void SPIRVEmitIntrinsics::propagateElemType(
481 Value *Op, Type *ElemTy,
482 DenseSet<std::pair<Value *, Value *>> &VisitedSubst) {
483 DenseMap<Function *, CallInst *> Ptrcasts;
484 SmallVector<User *> Users(Op->users());
485 for (auto *U : Users) {
486 if (!isa<Instruction>(U) || isSpvIntrinsic(U))
487 continue;
488 if (!VisitedSubst.insert(std::make_pair(U, Op)).second)
489 continue;
491 // If the instruction was validated already, we need to keep it valid by
492 // keeping current Op type.
493 if (isa<GetElementPtrInst>(UI) ||
494 TypeValidated.find(UI) != TypeValidated.end())
495 replaceUsesOfWithSpvPtrcast(Op, ElemTy, UI, Ptrcasts);
496 }
497}
498
499void SPIRVEmitIntrinsics::propagateElemTypeRec(
500 Value *Op, Type *PtrElemTy, Type *CastElemTy,
501 DenseSet<std::pair<Value *, Value *>> &VisitedSubst) {
502 std::unordered_set<Value *> Visited;
503 DenseMap<Function *, CallInst *> Ptrcasts;
504 propagateElemTypeRec(Op, PtrElemTy, CastElemTy, VisitedSubst, Visited,
505 std::move(Ptrcasts));
506}
507
508void SPIRVEmitIntrinsics::propagateElemTypeRec(
509 Value *Op, Type *PtrElemTy, Type *CastElemTy,
510 DenseSet<std::pair<Value *, Value *>> &VisitedSubst,
511 std::unordered_set<Value *> &Visited,
512 DenseMap<Function *, CallInst *> Ptrcasts) {
513 if (!Visited.insert(Op).second)
514 return;
515 SmallVector<User *> Users(Op->users());
516 for (auto *U : Users) {
517 if (!isa<Instruction>(U) || isSpvIntrinsic(U))
518 continue;
519 if (!VisitedSubst.insert(std::make_pair(U, Op)).second)
520 continue;
522 // If the instruction was validated already, we need to keep it valid by
523 // keeping current Op type.
524 if (isa<GetElementPtrInst>(UI) ||
525 TypeValidated.find(UI) != TypeValidated.end())
526 replaceUsesOfWithSpvPtrcast(Op, CastElemTy, UI, Ptrcasts);
527 }
528}
529
530// Set element pointer type to the given value of ValueTy and tries to
531// specify this type further (recursively) by Operand value, if needed.
532
533Type *
534SPIRVEmitIntrinsics::deduceElementTypeByValueDeep(Type *ValueTy, Value *Operand,
535 bool UnknownElemTypeI8) {
536 std::unordered_set<Value *> Visited;
537 return deduceElementTypeByValueDeep(ValueTy, Operand, Visited,
538 UnknownElemTypeI8);
539}
540
541Type *SPIRVEmitIntrinsics::deduceElementTypeByValueDeep(
542 Type *ValueTy, Value *Operand, std::unordered_set<Value *> &Visited,
543 bool UnknownElemTypeI8) {
544 Type *Ty = ValueTy;
545 if (Operand) {
546 if (auto *PtrTy = dyn_cast<PointerType>(Ty)) {
547 if (Type *NestedTy =
548 deduceElementTypeHelper(Operand, Visited, UnknownElemTypeI8))
549 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
550 } else {
551 Ty = deduceNestedTypeHelper(dyn_cast<User>(Operand), Ty, Visited,
552 UnknownElemTypeI8);
553 }
554 }
555 return Ty;
556}
557
558// Traverse User instructions to deduce an element pointer type of the operand.
559Type *SPIRVEmitIntrinsics::deduceElementTypeByUsersDeep(
560 Value *Op, std::unordered_set<Value *> &Visited, bool UnknownElemTypeI8) {
561 if (!Op || !isPointerTy(Op->getType()) || isa<ConstantPointerNull>(Op) ||
563 return nullptr;
564
565 if (auto ElemTy = getPointeeType(Op->getType()))
566 return ElemTy;
567
568 // maybe we already know operand's element type
569 if (Type *KnownTy = GR->findDeducedElementType(Op))
570 return KnownTy;
571
572 for (User *OpU : Op->users()) {
573 if (Instruction *Inst = dyn_cast<Instruction>(OpU)) {
574 if (Type *Ty = deduceElementTypeHelper(Inst, Visited, UnknownElemTypeI8))
575 return Ty;
576 }
577 }
578 return nullptr;
579}
580
581// Implements what we know in advance about intrinsics and builtin calls
582// TODO: consider feasibility of this particular case to be generalized by
583// encoding knowledge about intrinsics and builtin calls by corresponding
584// specification rules
586 Function *CalledF, unsigned OpIdx) {
587 if ((DemangledName.starts_with("__spirv_ocl_printf(") ||
588 DemangledName.starts_with("printf(")) &&
589 OpIdx == 0)
590 return IntegerType::getInt8Ty(CalledF->getContext());
591 return nullptr;
592}
593
594// Deduce and return a successfully deduced Type of the Instruction,
595// or nullptr otherwise.
596Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(Value *I,
597 bool UnknownElemTypeI8) {
598 std::unordered_set<Value *> Visited;
599 return deduceElementTypeHelper(I, Visited, UnknownElemTypeI8);
600}
601
602void SPIRVEmitIntrinsics::maybeAssignPtrType(Type *&Ty, Value *Op, Type *RefTy,
603 bool UnknownElemTypeI8) {
604 if (isUntypedPointerTy(RefTy)) {
605 if (!UnknownElemTypeI8)
606 return;
607 insertTodoType(Op);
608 }
609 Ty = RefTy;
610}
611
612bool SPIRVEmitIntrinsics::walkLogicalAccessChain(
613 GetElementPtrInst &GEP,
614 const std::function<void(Type *, uint64_t)> &OnLiteralIndexing,
615 const std::function<void(Type *, Value *)> &OnDynamicIndexing) {
616 // We only rewrite i8* GEP. Other should be left as-is.
617 // Valid i8* GEP must always have a single index.
618 assert(GEP.getSourceElementType() ==
619 IntegerType::getInt8Ty(CurrF->getContext()));
620 assert(GEP.getNumIndices() == 1);
621
622 auto &DL = CurrF->getDataLayout();
623 Value *Src = getPointerRoot(GEP.getPointerOperand());
624 Type *CurType = deduceElementType(Src, true);
625
626 Value *Operand = *GEP.idx_begin();
627 ConstantInt *CI = dyn_cast<ConstantInt>(Operand);
628 if (!CI) {
629 ArrayType *AT = dyn_cast<ArrayType>(CurType);
630 // Operand is not constant. Either we have an array and accept it, or we
631 // give up.
632 if (AT)
633 OnDynamicIndexing(AT->getElementType(), Operand);
634 return AT == nullptr;
635 }
636
637 assert(CI);
638 uint64_t Offset = CI->getZExtValue();
639
640 do {
641 if (ArrayType *AT = dyn_cast<ArrayType>(CurType)) {
642 uint32_t EltTypeSize = DL.getTypeSizeInBits(AT->getElementType()) / 8;
643 assert(Offset < AT->getNumElements() * EltTypeSize);
644 uint64_t Index = Offset / EltTypeSize;
645 Offset = Offset - (Index * EltTypeSize);
646 CurType = AT->getElementType();
647 OnLiteralIndexing(CurType, Index);
648 } else if (StructType *ST = dyn_cast<StructType>(CurType)) {
649 uint32_t StructSize = DL.getTypeSizeInBits(ST) / 8;
650 assert(Offset < StructSize);
651 (void)StructSize;
652 const auto &STL = DL.getStructLayout(ST);
653 unsigned Element = STL->getElementContainingOffset(Offset);
654 Offset -= STL->getElementOffset(Element);
655 CurType = ST->getElementType(Element);
656 OnLiteralIndexing(CurType, Element);
657 } else {
658 // Vector type indexing should not use GEP.
659 // So if we have an index left, something is wrong. Giving up.
660 return true;
661 }
662 } while (Offset > 0);
663
664 return false;
665}
666
668SPIRVEmitIntrinsics::buildLogicalAccessChainFromGEP(GetElementPtrInst &GEP) {
669 auto &DL = CurrF->getDataLayout();
670 IRBuilder<> B(GEP.getParent());
671 B.SetInsertPoint(&GEP);
672
673 std::vector<Value *> Indices;
674 Indices.push_back(ConstantInt::get(
675 IntegerType::getInt32Ty(CurrF->getContext()), 0, /* Signed= */ false));
676 walkLogicalAccessChain(
677 GEP,
678 [&Indices, &B](Type *EltType, uint64_t Index) {
679 Indices.push_back(
680 ConstantInt::get(B.getInt64Ty(), Index, /* Signed= */ false));
681 },
682 [&Indices, &B, &DL](Type *EltType, Value *Offset) {
683 uint32_t EltTypeSize = DL.getTypeSizeInBits(EltType) / 8;
684 Value *Index = B.CreateUDiv(
685 Offset, ConstantInt::get(Offset->getType(), EltTypeSize,
686 /* Signed= */ false));
687 Indices.push_back(Index);
688 });
689
690 SmallVector<Type *, 2> Types = {GEP.getType(), GEP.getOperand(0)->getType()};
692 Args.push_back(B.getInt1(GEP.isInBounds()));
693 Args.push_back(GEP.getOperand(0));
694 llvm::append_range(Args, Indices);
695 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args});
696 replaceAllUsesWithAndErase(B, &GEP, NewI);
697 return NewI;
698}
699
700Type *SPIRVEmitIntrinsics::getGEPTypeLogical(GetElementPtrInst *GEP) {
701
702 Type *CurType = GEP->getResultElementType();
703
704 bool Interrupted = walkLogicalAccessChain(
705 *GEP, [&CurType](Type *EltType, uint64_t Index) { CurType = EltType; },
706 [&CurType](Type *EltType, Value *Index) { CurType = EltType; });
707
708 return Interrupted ? GEP->getResultElementType() : CurType;
709}
710
711Type *SPIRVEmitIntrinsics::getGEPType(GetElementPtrInst *Ref) {
712 if (Ref->getSourceElementType() ==
713 IntegerType::getInt8Ty(CurrF->getContext()) &&
715 return getGEPTypeLogical(Ref);
716 }
717
718 Type *Ty = nullptr;
719 // TODO: not sure if GetElementPtrInst::getTypeAtIndex() does anything
720 // useful here
721 if (isNestedPointer(Ref->getSourceElementType())) {
722 Ty = Ref->getSourceElementType();
723 for (Use &U : drop_begin(Ref->indices()))
724 Ty = GetElementPtrInst::getTypeAtIndex(Ty, U.get());
725 } else {
726 Ty = Ref->getResultElementType();
727 }
728 return Ty;
729}
730
731Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(
732 Value *I, std::unordered_set<Value *> &Visited, bool UnknownElemTypeI8,
733 bool IgnoreKnownType) {
734 // allow to pass nullptr as an argument
735 if (!I)
736 return nullptr;
737
738 // maybe already known
739 if (!IgnoreKnownType)
740 if (Type *KnownTy = GR->findDeducedElementType(I))
741 return KnownTy;
742
743 // maybe a cycle
744 if (!Visited.insert(I).second)
745 return nullptr;
746
747 // fallback value in case when we fail to deduce a type
748 Type *Ty = nullptr;
749 // look for known basic patterns of type inference
750 if (auto *Ref = dyn_cast<AllocaInst>(I)) {
751 maybeAssignPtrType(Ty, I, Ref->getAllocatedType(), UnknownElemTypeI8);
752 } else if (auto *Ref = dyn_cast<GetElementPtrInst>(I)) {
753 Ty = getGEPType(Ref);
754 } else if (auto *Ref = dyn_cast<LoadInst>(I)) {
755 Value *Op = Ref->getPointerOperand();
756 Type *KnownTy = GR->findDeducedElementType(Op);
757 if (!KnownTy)
758 KnownTy = Op->getType();
759 if (Type *ElemTy = getPointeeType(KnownTy))
760 maybeAssignPtrType(Ty, I, ElemTy, UnknownElemTypeI8);
761 } else if (auto *Ref = dyn_cast<GlobalValue>(I)) {
762 Ty = deduceElementTypeByValueDeep(
763 Ref->getValueType(),
764 Ref->getNumOperands() > 0 ? Ref->getOperand(0) : nullptr, Visited,
765 UnknownElemTypeI8);
766 } else if (auto *Ref = dyn_cast<AddrSpaceCastInst>(I)) {
767 Type *RefTy = deduceElementTypeHelper(Ref->getPointerOperand(), Visited,
768 UnknownElemTypeI8);
769 maybeAssignPtrType(Ty, I, RefTy, UnknownElemTypeI8);
770 } else if (auto *Ref = dyn_cast<BitCastInst>(I)) {
771 if (Type *Src = Ref->getSrcTy(), *Dest = Ref->getDestTy();
772 isPointerTy(Src) && isPointerTy(Dest))
773 Ty = deduceElementTypeHelper(Ref->getOperand(0), Visited,
774 UnknownElemTypeI8);
775 } else if (auto *Ref = dyn_cast<AtomicCmpXchgInst>(I)) {
776 Value *Op = Ref->getNewValOperand();
777 if (isPointerTy(Op->getType()))
778 Ty = deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8);
779 } else if (auto *Ref = dyn_cast<AtomicRMWInst>(I)) {
780 Value *Op = Ref->getValOperand();
781 if (isPointerTy(Op->getType()))
782 Ty = deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8);
783 } else if (auto *Ref = dyn_cast<PHINode>(I)) {
784 Type *BestTy = nullptr;
785 unsigned MaxN = 1;
786 DenseMap<Type *, unsigned> PhiTys;
787 for (int i = Ref->getNumIncomingValues() - 1; i >= 0; --i) {
788 Ty = deduceElementTypeByUsersDeep(Ref->getIncomingValue(i), Visited,
789 UnknownElemTypeI8);
790 if (!Ty)
791 continue;
792 auto It = PhiTys.try_emplace(Ty, 1);
793 if (!It.second) {
794 ++It.first->second;
795 if (It.first->second > MaxN) {
796 MaxN = It.first->second;
797 BestTy = Ty;
798 }
799 }
800 }
801 if (BestTy)
802 Ty = BestTy;
803 } else if (auto *Ref = dyn_cast<SelectInst>(I)) {
804 for (Value *Op : {Ref->getTrueValue(), Ref->getFalseValue()}) {
805 Ty = deduceElementTypeByUsersDeep(Op, Visited, UnknownElemTypeI8);
806 if (Ty)
807 break;
808 }
809 } else if (auto *CI = dyn_cast<CallInst>(I)) {
810 static StringMap<unsigned> ResTypeByArg = {
811 {"to_global", 0},
812 {"to_local", 0},
813 {"to_private", 0},
814 {"__spirv_GenericCastToPtr_ToGlobal", 0},
815 {"__spirv_GenericCastToPtr_ToLocal", 0},
816 {"__spirv_GenericCastToPtr_ToPrivate", 0},
817 {"__spirv_GenericCastToPtrExplicit_ToGlobal", 0},
818 {"__spirv_GenericCastToPtrExplicit_ToLocal", 0},
819 {"__spirv_GenericCastToPtrExplicit_ToPrivate", 0}};
820 // TODO: maybe improve performance by caching demangled names
821
823 if (II && II->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
824 auto *HandleType = cast<TargetExtType>(II->getOperand(0)->getType());
825 if (HandleType->getTargetExtName() == "spirv.Image" ||
826 HandleType->getTargetExtName() == "spirv.SignedImage") {
827 for (User *U : II->users()) {
828 Ty = cast<Instruction>(U)->getAccessType();
829 if (Ty)
830 break;
831 }
832 } else if (HandleType->getTargetExtName() == "spirv.VulkanBuffer") {
833 // This call is supposed to index into an array
834 Ty = HandleType->getTypeParameter(0);
835 if (Ty->isArrayTy())
836 Ty = Ty->getArrayElementType();
837 else {
838 TargetExtType *BufferTy = cast<TargetExtType>(Ty);
839 assert(BufferTy->getTargetExtName() == "spirv.Layout");
840 Ty = BufferTy->getTypeParameter(0);
841 assert(Ty && Ty->isStructTy());
842 uint32_t Index = cast<ConstantInt>(II->getOperand(1))->getZExtValue();
843 Ty = cast<StructType>(Ty)->getElementType(Index);
844 }
845 } else {
846 llvm_unreachable("Unknown handle type for spv_resource_getpointer.");
847 }
848 } else if (II && II->getIntrinsicID() ==
849 Intrinsic::spv_generic_cast_to_ptr_explicit) {
850 Ty = deduceElementTypeHelper(CI->getArgOperand(0), Visited,
851 UnknownElemTypeI8);
852 } else if (Function *CalledF = CI->getCalledFunction()) {
853 std::string DemangledName =
854 getOclOrSpirvBuiltinDemangledName(CalledF->getName());
855 if (DemangledName.length() > 0)
856 DemangledName = SPIRV::lookupBuiltinNameHelper(DemangledName);
857 auto AsArgIt = ResTypeByArg.find(DemangledName);
858 if (AsArgIt != ResTypeByArg.end())
859 Ty = deduceElementTypeHelper(CI->getArgOperand(AsArgIt->second),
860 Visited, UnknownElemTypeI8);
861 else if (Type *KnownRetTy = GR->findDeducedElementType(CalledF))
862 Ty = KnownRetTy;
863 }
864 }
865
866 // remember the found relationship
867 if (Ty && !IgnoreKnownType) {
868 // specify nested types if needed, otherwise return unchanged
870 }
871
872 return Ty;
873}
874
875// Re-create a type of the value if it has untyped pointer fields, also nested.
876// Return the original value type if no corrections of untyped pointer
877// information is found or needed.
878Type *SPIRVEmitIntrinsics::deduceNestedTypeHelper(User *U,
879 bool UnknownElemTypeI8) {
880 std::unordered_set<Value *> Visited;
881 return deduceNestedTypeHelper(U, U->getType(), Visited, UnknownElemTypeI8);
882}
883
884Type *SPIRVEmitIntrinsics::deduceNestedTypeHelper(
885 User *U, Type *OrigTy, std::unordered_set<Value *> &Visited,
886 bool UnknownElemTypeI8) {
887 if (!U)
888 return OrigTy;
889
890 // maybe already known
891 if (Type *KnownTy = GR->findDeducedCompositeType(U))
892 return KnownTy;
893
894 // maybe a cycle
895 if (!Visited.insert(U).second)
896 return OrigTy;
897
898 if (isa<StructType>(OrigTy)) {
900 bool Change = false;
901 for (unsigned i = 0; i < U->getNumOperands(); ++i) {
902 Value *Op = U->getOperand(i);
903 assert(Op && "Operands should not be null.");
904 Type *OpTy = Op->getType();
905 Type *Ty = OpTy;
906 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
907 if (Type *NestedTy =
908 deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8))
909 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
910 } else {
911 Ty = deduceNestedTypeHelper(dyn_cast<User>(Op), OpTy, Visited,
912 UnknownElemTypeI8);
913 }
914 Tys.push_back(Ty);
915 Change |= Ty != OpTy;
916 }
917 if (Change) {
918 Type *NewTy = StructType::create(Tys);
919 GR->addDeducedCompositeType(U, NewTy);
920 return NewTy;
921 }
922 } else if (auto *ArrTy = dyn_cast<ArrayType>(OrigTy)) {
923 if (Value *Op = U->getNumOperands() > 0 ? U->getOperand(0) : nullptr) {
924 Type *OpTy = ArrTy->getElementType();
925 Type *Ty = OpTy;
926 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
927 if (Type *NestedTy =
928 deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8))
929 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
930 } else {
931 Ty = deduceNestedTypeHelper(dyn_cast<User>(Op), OpTy, Visited,
932 UnknownElemTypeI8);
933 }
934 if (Ty != OpTy) {
935 Type *NewTy = ArrayType::get(Ty, ArrTy->getNumElements());
936 GR->addDeducedCompositeType(U, NewTy);
937 return NewTy;
938 }
939 }
940 } else if (auto *VecTy = dyn_cast<VectorType>(OrigTy)) {
941 if (Value *Op = U->getNumOperands() > 0 ? U->getOperand(0) : nullptr) {
942 Type *OpTy = VecTy->getElementType();
943 Type *Ty = OpTy;
944 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
945 if (Type *NestedTy =
946 deduceElementTypeHelper(Op, Visited, UnknownElemTypeI8))
947 Ty = getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
948 } else {
949 Ty = deduceNestedTypeHelper(dyn_cast<User>(Op), OpTy, Visited,
950 UnknownElemTypeI8);
951 }
952 if (Ty != OpTy) {
953 Type *NewTy = VectorType::get(Ty, VecTy->getElementCount());
955 return NewTy;
956 }
957 }
958 }
959
960 return OrigTy;
961}
962
963Type *SPIRVEmitIntrinsics::deduceElementType(Value *I, bool UnknownElemTypeI8) {
964 if (Type *Ty = deduceElementTypeHelper(I, UnknownElemTypeI8))
965 return Ty;
966 if (!UnknownElemTypeI8)
967 return nullptr;
968 insertTodoType(I);
969 return IntegerType::getInt8Ty(I->getContext());
970}
971
973 Value *PointerOperand) {
974 Type *PointeeTy = GR->findDeducedElementType(PointerOperand);
975 if (PointeeTy && !isUntypedPointerTy(PointeeTy))
976 return nullptr;
977 auto *PtrTy = dyn_cast<PointerType>(I->getType());
978 if (!PtrTy)
979 return I->getType();
980 if (Type *NestedTy = GR->findDeducedElementType(I))
981 return getTypedPointerWrapper(NestedTy, PtrTy->getAddressSpace());
982 return nullptr;
983}
984
985// Try to deduce element type for a call base. Returns false if this is an
986// indirect function invocation, and true otherwise.
987bool SPIRVEmitIntrinsics::deduceOperandElementTypeCalledFunction(
988 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
989 Type *&KnownElemTy, bool &Incomplete) {
990 Function *CalledF = CI->getCalledFunction();
991 if (!CalledF)
992 return false;
993 std::string DemangledName =
995 if (DemangledName.length() > 0 &&
996 !StringRef(DemangledName).starts_with("llvm.")) {
997 const SPIRVSubtarget &ST = TM->getSubtarget<SPIRVSubtarget>(*CalledF);
998 auto [Grp, Opcode, ExtNo] = SPIRV::mapBuiltinToOpcode(
999 DemangledName, ST.getPreferredInstructionSet());
1000 if (Opcode == SPIRV::OpGroupAsyncCopy) {
1001 for (unsigned i = 0, PtrCnt = 0; i < CI->arg_size() && PtrCnt < 2; ++i) {
1002 Value *Op = CI->getArgOperand(i);
1003 if (!isPointerTy(Op->getType()))
1004 continue;
1005 ++PtrCnt;
1006 if (Type *ElemTy = GR->findDeducedElementType(Op))
1007 KnownElemTy = ElemTy; // src will rewrite dest if both are defined
1008 Ops.push_back(std::make_pair(Op, i));
1009 }
1010 } else if (Grp == SPIRV::Atomic || Grp == SPIRV::AtomicFloating) {
1011 if (CI->arg_size() == 0)
1012 return true;
1013 Value *Op = CI->getArgOperand(0);
1014 if (!isPointerTy(Op->getType()))
1015 return true;
1016 switch (Opcode) {
1017 case SPIRV::OpAtomicFAddEXT:
1018 case SPIRV::OpAtomicFMinEXT:
1019 case SPIRV::OpAtomicFMaxEXT:
1020 case SPIRV::OpAtomicLoad:
1021 case SPIRV::OpAtomicCompareExchangeWeak:
1022 case SPIRV::OpAtomicCompareExchange:
1023 case SPIRV::OpAtomicExchange:
1024 case SPIRV::OpAtomicIAdd:
1025 case SPIRV::OpAtomicISub:
1026 case SPIRV::OpAtomicOr:
1027 case SPIRV::OpAtomicXor:
1028 case SPIRV::OpAtomicAnd:
1029 case SPIRV::OpAtomicUMin:
1030 case SPIRV::OpAtomicUMax:
1031 case SPIRV::OpAtomicSMin:
1032 case SPIRV::OpAtomicSMax: {
1033 KnownElemTy = isPointerTy(CI->getType()) ? getAtomicElemTy(GR, CI, Op)
1034 : CI->getType();
1035 if (!KnownElemTy)
1036 return true;
1037 Incomplete = isTodoType(Op);
1038 Ops.push_back(std::make_pair(Op, 0));
1039 } break;
1040 case SPIRV::OpAtomicStore: {
1041 if (CI->arg_size() < 4)
1042 return true;
1043 Value *ValOp = CI->getArgOperand(3);
1044 KnownElemTy = isPointerTy(ValOp->getType())
1045 ? getAtomicElemTy(GR, CI, Op)
1046 : ValOp->getType();
1047 if (!KnownElemTy)
1048 return true;
1049 Incomplete = isTodoType(Op);
1050 Ops.push_back(std::make_pair(Op, 0));
1051 } break;
1052 }
1053 }
1054 }
1055 return true;
1056}
1057
1058// Try to deduce element type for a function pointer.
1059void SPIRVEmitIntrinsics::deduceOperandElementTypeFunctionPointer(
1060 CallInst *CI, SmallVector<std::pair<Value *, unsigned>> &Ops,
1061 Type *&KnownElemTy, bool IsPostprocessing) {
1062 Value *Op = CI->getCalledOperand();
1063 if (!Op || !isPointerTy(Op->getType()))
1064 return;
1065 Ops.push_back(std::make_pair(Op, std::numeric_limits<unsigned>::max()));
1066 FunctionType *FTy = CI->getFunctionType();
1067 bool IsNewFTy = false, IsIncomplete = false;
1069 for (Value *Arg : CI->args()) {
1070 Type *ArgTy = Arg->getType();
1071 if (ArgTy->isPointerTy()) {
1072 if (Type *ElemTy = GR->findDeducedElementType(Arg)) {
1073 IsNewFTy = true;
1074 ArgTy = getTypedPointerWrapper(ElemTy, getPointerAddressSpace(ArgTy));
1075 if (isTodoType(Arg))
1076 IsIncomplete = true;
1077 } else {
1078 IsIncomplete = true;
1079 }
1080 }
1081 ArgTys.push_back(ArgTy);
1082 }
1083 Type *RetTy = FTy->getReturnType();
1084 if (CI->getType()->isPointerTy()) {
1085 if (Type *ElemTy = GR->findDeducedElementType(CI)) {
1086 IsNewFTy = true;
1087 RetTy =
1089 if (isTodoType(CI))
1090 IsIncomplete = true;
1091 } else {
1092 IsIncomplete = true;
1093 }
1094 }
1095 if (!IsPostprocessing && IsIncomplete)
1096 insertTodoType(Op);
1097 KnownElemTy =
1098 IsNewFTy ? FunctionType::get(RetTy, ArgTys, FTy->isVarArg()) : FTy;
1099}
1100
1101bool SPIRVEmitIntrinsics::deduceOperandElementTypeFunctionRet(
1102 Instruction *I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
1103 const SmallPtrSet<Value *, 4> *AskOps, bool IsPostprocessing,
1104 Type *&KnownElemTy, Value *Op, Function *F) {
1105 KnownElemTy = GR->findDeducedElementType(F);
1106 if (KnownElemTy)
1107 return false;
1108 if (Type *OpElemTy = GR->findDeducedElementType(Op)) {
1109 OpElemTy = normalizeType(OpElemTy);
1110 GR->addDeducedElementType(F, OpElemTy);
1111 GR->addReturnType(
1112 F, TypedPointerType::get(OpElemTy,
1113 getPointerAddressSpace(F->getReturnType())));
1114 // non-recursive update of types in function uses
1115 DenseSet<std::pair<Value *, Value *>> VisitedSubst{std::make_pair(I, Op)};
1116 for (User *U : F->users()) {
1117 CallInst *CI = dyn_cast<CallInst>(U);
1118 if (!CI || CI->getCalledFunction() != F)
1119 continue;
1120 if (CallInst *AssignCI = GR->findAssignPtrTypeInstr(CI)) {
1121 if (Type *PrevElemTy = GR->findDeducedElementType(CI)) {
1122 GR->updateAssignType(AssignCI, CI,
1123 getNormalizedPoisonValue(OpElemTy));
1124 propagateElemType(CI, PrevElemTy, VisitedSubst);
1125 }
1126 }
1127 }
1128 // Non-recursive update of types in the function uncomplete returns.
1129 // This may happen just once per a function, the latch is a pair of
1130 // findDeducedElementType(F) / addDeducedElementType(F, ...).
1131 // With or without the latch it is a non-recursive call due to
1132 // IncompleteRets set to nullptr in this call.
1133 if (IncompleteRets)
1134 for (Instruction *IncompleteRetI : *IncompleteRets)
1135 deduceOperandElementType(IncompleteRetI, nullptr, AskOps,
1136 IsPostprocessing);
1137 } else if (IncompleteRets) {
1138 IncompleteRets->insert(I);
1139 }
1140 TypeValidated.insert(I);
1141 return true;
1142}
1143
1144// If the Instruction has Pointer operands with unresolved types, this function
1145// tries to deduce them. If the Instruction has Pointer operands with known
1146// types which differ from expected, this function tries to insert a bitcast to
1147// resolve the issue.
1148void SPIRVEmitIntrinsics::deduceOperandElementType(
1149 Instruction *I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
1150 const SmallPtrSet<Value *, 4> *AskOps, bool IsPostprocessing) {
1152 Type *KnownElemTy = nullptr;
1153 bool Incomplete = false;
1154 // look for known basic patterns of type inference
1155 if (auto *Ref = dyn_cast<PHINode>(I)) {
1156 if (!isPointerTy(I->getType()) ||
1157 !(KnownElemTy = GR->findDeducedElementType(I)))
1158 return;
1159 Incomplete = isTodoType(I);
1160 for (unsigned i = 0; i < Ref->getNumIncomingValues(); i++) {
1161 Value *Op = Ref->getIncomingValue(i);
1162 if (isPointerTy(Op->getType()))
1163 Ops.push_back(std::make_pair(Op, i));
1164 }
1165 } else if (auto *Ref = dyn_cast<AddrSpaceCastInst>(I)) {
1166 KnownElemTy = GR->findDeducedElementType(I);
1167 if (!KnownElemTy)
1168 return;
1169 Incomplete = isTodoType(I);
1170 Ops.push_back(std::make_pair(Ref->getPointerOperand(), 0));
1171 } else if (auto *Ref = dyn_cast<BitCastInst>(I)) {
1172 if (!isPointerTy(I->getType()))
1173 return;
1174 KnownElemTy = GR->findDeducedElementType(I);
1175 if (!KnownElemTy)
1176 return;
1177 Incomplete = isTodoType(I);
1178 Ops.push_back(std::make_pair(Ref->getOperand(0), 0));
1179 } else if (auto *Ref = dyn_cast<GetElementPtrInst>(I)) {
1180 if (GR->findDeducedElementType(Ref->getPointerOperand()))
1181 return;
1182 KnownElemTy = Ref->getSourceElementType();
1183 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1185 } else if (auto *Ref = dyn_cast<LoadInst>(I)) {
1186 KnownElemTy = I->getType();
1187 if (isUntypedPointerTy(KnownElemTy))
1188 return;
1189 Type *PointeeTy = GR->findDeducedElementType(Ref->getPointerOperand());
1190 if (PointeeTy && !isUntypedPointerTy(PointeeTy))
1191 return;
1192 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1194 } else if (auto *Ref = dyn_cast<StoreInst>(I)) {
1195 if (!(KnownElemTy =
1196 reconstructType(Ref->getValueOperand(), false, IsPostprocessing)))
1197 return;
1198 Type *PointeeTy = GR->findDeducedElementType(Ref->getPointerOperand());
1199 if (PointeeTy && !isUntypedPointerTy(PointeeTy))
1200 return;
1201 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1203 } else if (auto *Ref = dyn_cast<AtomicCmpXchgInst>(I)) {
1204 KnownElemTy = isPointerTy(I->getType())
1205 ? getAtomicElemTy(GR, I, Ref->getPointerOperand())
1206 : I->getType();
1207 if (!KnownElemTy)
1208 return;
1209 Incomplete = isTodoType(Ref->getPointerOperand());
1210 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1212 } else if (auto *Ref = dyn_cast<AtomicRMWInst>(I)) {
1213 KnownElemTy = isPointerTy(I->getType())
1214 ? getAtomicElemTy(GR, I, Ref->getPointerOperand())
1215 : I->getType();
1216 if (!KnownElemTy)
1217 return;
1218 Incomplete = isTodoType(Ref->getPointerOperand());
1219 Ops.push_back(std::make_pair(Ref->getPointerOperand(),
1221 } else if (auto *Ref = dyn_cast<SelectInst>(I)) {
1222 if (!isPointerTy(I->getType()) ||
1223 !(KnownElemTy = GR->findDeducedElementType(I)))
1224 return;
1225 Incomplete = isTodoType(I);
1226 for (unsigned i = 0; i < Ref->getNumOperands(); i++) {
1227 Value *Op = Ref->getOperand(i);
1228 if (isPointerTy(Op->getType()))
1229 Ops.push_back(std::make_pair(Op, i));
1230 }
1231 } else if (auto *Ref = dyn_cast<ReturnInst>(I)) {
1232 if (!isPointerTy(CurrF->getReturnType()))
1233 return;
1234 Value *Op = Ref->getReturnValue();
1235 if (!Op)
1236 return;
1237 if (deduceOperandElementTypeFunctionRet(I, IncompleteRets, AskOps,
1238 IsPostprocessing, KnownElemTy, Op,
1239 CurrF))
1240 return;
1241 Incomplete = isTodoType(CurrF);
1242 Ops.push_back(std::make_pair(Op, 0));
1243 } else if (auto *Ref = dyn_cast<ICmpInst>(I)) {
1244 if (!isPointerTy(Ref->getOperand(0)->getType()))
1245 return;
1246 Value *Op0 = Ref->getOperand(0);
1247 Value *Op1 = Ref->getOperand(1);
1248 bool Incomplete0 = isTodoType(Op0);
1249 bool Incomplete1 = isTodoType(Op1);
1250 Type *ElemTy1 = GR->findDeducedElementType(Op1);
1251 Type *ElemTy0 = (Incomplete0 && !Incomplete1 && ElemTy1)
1252 ? nullptr
1253 : GR->findDeducedElementType(Op0);
1254 if (ElemTy0) {
1255 KnownElemTy = ElemTy0;
1256 Incomplete = Incomplete0;
1257 Ops.push_back(std::make_pair(Op1, 1));
1258 } else if (ElemTy1) {
1259 KnownElemTy = ElemTy1;
1260 Incomplete = Incomplete1;
1261 Ops.push_back(std::make_pair(Op0, 0));
1262 }
1263 } else if (CallInst *CI = dyn_cast<CallInst>(I)) {
1264 if (!CI->isIndirectCall())
1265 deduceOperandElementTypeCalledFunction(CI, Ops, KnownElemTy, Incomplete);
1266 else if (HaveFunPtrs)
1267 deduceOperandElementTypeFunctionPointer(CI, Ops, KnownElemTy,
1268 IsPostprocessing);
1269 }
1270
1271 // There is no enough info to deduce types or all is valid.
1272 if (!KnownElemTy || Ops.size() == 0)
1273 return;
1274
1275 LLVMContext &Ctx = CurrF->getContext();
1276 IRBuilder<> B(Ctx);
1277 for (auto &OpIt : Ops) {
1278 Value *Op = OpIt.first;
1279 if (AskOps && !AskOps->contains(Op))
1280 continue;
1281 Type *AskTy = nullptr;
1282 CallInst *AskCI = nullptr;
1283 if (IsPostprocessing && AskOps) {
1284 AskTy = GR->findDeducedElementType(Op);
1285 AskCI = GR->findAssignPtrTypeInstr(Op);
1286 assert(AskTy && AskCI);
1287 }
1288 Type *Ty = AskTy ? AskTy : GR->findDeducedElementType(Op);
1289 if (Ty == KnownElemTy)
1290 continue;
1291 Value *OpTyVal = getNormalizedPoisonValue(KnownElemTy);
1292 Type *OpTy = Op->getType();
1293 if (Op->hasUseList() &&
1294 (!Ty || AskTy || isUntypedPointerTy(Ty) || isTodoType(Op))) {
1295 Type *PrevElemTy = GR->findDeducedElementType(Op);
1296 GR->addDeducedElementType(Op, normalizeType(KnownElemTy));
1297 // check if KnownElemTy is complete
1298 if (!Incomplete)
1299 eraseTodoType(Op);
1300 else if (!IsPostprocessing)
1301 insertTodoType(Op);
1302 // check if there is existing Intrinsic::spv_assign_ptr_type instruction
1303 CallInst *AssignCI = AskCI ? AskCI : GR->findAssignPtrTypeInstr(Op);
1304 if (AssignCI == nullptr) {
1305 Instruction *User = dyn_cast<Instruction>(Op->use_begin()->get());
1306 setInsertPointSkippingPhis(B, User ? User->getNextNode() : I);
1307 CallInst *CI =
1308 buildIntrWithMD(Intrinsic::spv_assign_ptr_type, {OpTy}, OpTyVal, Op,
1309 {B.getInt32(getPointerAddressSpace(OpTy))}, B);
1310 GR->addAssignPtrTypeInstr(Op, CI);
1311 } else {
1312 GR->updateAssignType(AssignCI, Op, OpTyVal);
1313 DenseSet<std::pair<Value *, Value *>> VisitedSubst{
1314 std::make_pair(I, Op)};
1315 propagateElemTypeRec(Op, KnownElemTy, PrevElemTy, VisitedSubst);
1316 }
1317 } else {
1318 eraseTodoType(Op);
1319 CallInst *PtrCastI =
1320 buildSpvPtrcast(I->getParent()->getParent(), Op, KnownElemTy);
1321 if (OpIt.second == std::numeric_limits<unsigned>::max())
1322 dyn_cast<CallInst>(I)->setCalledOperand(PtrCastI);
1323 else
1324 I->setOperand(OpIt.second, PtrCastI);
1325 }
1326 }
1327 TypeValidated.insert(I);
1328}
1329
1330void SPIRVEmitIntrinsics::replaceMemInstrUses(Instruction *Old,
1331 Instruction *New,
1332 IRBuilder<> &B) {
1333 while (!Old->user_empty()) {
1334 auto *U = Old->user_back();
1335 if (isAssignTypeInstr(U)) {
1336 B.SetInsertPoint(U);
1337 SmallVector<Value *, 2> Args = {New, U->getOperand(1)};
1338 CallInst *AssignCI =
1339 B.CreateIntrinsic(Intrinsic::spv_assign_type, {New->getType()}, Args);
1340 GR->addAssignPtrTypeInstr(New, AssignCI);
1341 U->eraseFromParent();
1342 } else if (isMemInstrToReplace(U) || isa<ReturnInst>(U) ||
1343 isa<CallInst>(U)) {
1344 U->replaceUsesOfWith(Old, New);
1345 } else {
1346 llvm_unreachable("illegal aggregate intrinsic user");
1347 }
1348 }
1349 New->copyMetadata(*Old);
1350 Old->eraseFromParent();
1351}
1352
1353void SPIRVEmitIntrinsics::preprocessUndefs(IRBuilder<> &B) {
1354 std::queue<Instruction *> Worklist;
1355 for (auto &I : instructions(CurrF))
1356 Worklist.push(&I);
1357
1358 while (!Worklist.empty()) {
1359 Instruction *I = Worklist.front();
1360 bool BPrepared = false;
1361 Worklist.pop();
1362
1363 for (auto &Op : I->operands()) {
1364 auto *AggrUndef = dyn_cast<UndefValue>(Op);
1365 if (!AggrUndef || !Op->getType()->isAggregateType())
1366 continue;
1367
1368 if (!BPrepared) {
1370 BPrepared = true;
1371 }
1372 auto *IntrUndef = B.CreateIntrinsic(Intrinsic::spv_undef, {});
1373 Worklist.push(IntrUndef);
1374 I->replaceUsesOfWith(Op, IntrUndef);
1375 AggrConsts[IntrUndef] = AggrUndef;
1376 AggrConstTypes[IntrUndef] = AggrUndef->getType();
1377 }
1378 }
1379}
1380
1381void SPIRVEmitIntrinsics::preprocessCompositeConstants(IRBuilder<> &B) {
1382 std::queue<Instruction *> Worklist;
1383 for (auto &I : instructions(CurrF))
1384 Worklist.push(&I);
1385
1386 while (!Worklist.empty()) {
1387 auto *I = Worklist.front();
1388 bool IsPhi = isa<PHINode>(I), BPrepared = false;
1389 assert(I);
1390 bool KeepInst = false;
1391 for (const auto &Op : I->operands()) {
1392 Constant *AggrConst = nullptr;
1393 Type *ResTy = nullptr;
1394 if (auto *COp = dyn_cast<ConstantVector>(Op)) {
1395 AggrConst = cast<Constant>(COp);
1396 ResTy = COp->getType();
1397 } else if (auto *COp = dyn_cast<ConstantArray>(Op)) {
1398 AggrConst = cast<Constant>(COp);
1399 ResTy = B.getInt32Ty();
1400 } else if (auto *COp = dyn_cast<ConstantStruct>(Op)) {
1401 AggrConst = cast<Constant>(COp);
1402 ResTy = B.getInt32Ty();
1403 } else if (auto *COp = dyn_cast<ConstantDataArray>(Op)) {
1404 AggrConst = cast<Constant>(COp);
1405 ResTy = B.getInt32Ty();
1406 } else if (auto *COp = dyn_cast<ConstantAggregateZero>(Op)) {
1407 AggrConst = cast<Constant>(COp);
1408 ResTy = Op->getType()->isVectorTy() ? COp->getType() : B.getInt32Ty();
1409 }
1410 if (AggrConst) {
1412 if (auto *COp = dyn_cast<ConstantDataSequential>(Op))
1413 for (unsigned i = 0; i < COp->getNumElements(); ++i)
1414 Args.push_back(COp->getElementAsConstant(i));
1415 else
1416 llvm::append_range(Args, AggrConst->operands());
1417 if (!BPrepared) {
1418 IsPhi ? B.SetInsertPointPastAllocas(I->getParent()->getParent())
1419 : B.SetInsertPoint(I);
1420 BPrepared = true;
1421 }
1422 auto *CI =
1423 B.CreateIntrinsic(Intrinsic::spv_const_composite, {ResTy}, {Args});
1424 Worklist.push(CI);
1425 I->replaceUsesOfWith(Op, CI);
1426 KeepInst = true;
1427 AggrConsts[CI] = AggrConst;
1428 AggrConstTypes[CI] = deduceNestedTypeHelper(AggrConst, false);
1429 }
1430 }
1431 if (!KeepInst)
1432 Worklist.pop();
1433 }
1434}
1435
1437 IRBuilder<> &B) {
1438 LLVMContext &Ctx = I->getContext();
1440 B.CreateIntrinsic(Intrinsic::spv_assign_decoration, {I->getType()},
1441 {I, MetadataAsValue::get(Ctx, MDNode::get(Ctx, {Node}))});
1442}
1443
1445 unsigned RoundingModeDeco,
1446 IRBuilder<> &B) {
1447 LLVMContext &Ctx = I->getContext();
1449 MDNode *RoundingModeNode = MDNode::get(
1450 Ctx,
1452 ConstantInt::get(Int32Ty, SPIRV::Decoration::FPRoundingMode)),
1453 ConstantAsMetadata::get(ConstantInt::get(Int32Ty, RoundingModeDeco))});
1454 createDecorationIntrinsic(I, RoundingModeNode, B);
1455}
1456
1458 IRBuilder<> &B) {
1459 LLVMContext &Ctx = I->getContext();
1461 MDNode *SaturatedConversionNode =
1462 MDNode::get(Ctx, {ConstantAsMetadata::get(ConstantInt::get(
1463 Int32Ty, SPIRV::Decoration::SaturatedConversion))});
1464 createDecorationIntrinsic(I, SaturatedConversionNode, B);
1465}
1466
1468 if (auto *CI = dyn_cast<CallInst>(I)) {
1469 if (Function *Fu = CI->getCalledFunction()) {
1470 if (Fu->isIntrinsic()) {
1471 unsigned const int IntrinsicId = Fu->getIntrinsicID();
1472 switch (IntrinsicId) {
1473 case Intrinsic::fptosi_sat:
1474 case Intrinsic::fptoui_sat:
1476 break;
1477 default:
1478 break;
1479 }
1480 }
1481 }
1482 }
1483}
1484
1485Instruction *SPIRVEmitIntrinsics::visitCallInst(CallInst &Call) {
1486 if (!Call.isInlineAsm())
1487 return &Call;
1488
1489 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
1490 LLVMContext &Ctx = CurrF->getContext();
1491
1492 Constant *TyC = UndefValue::get(IA->getFunctionType());
1493 MDString *ConstraintString = MDString::get(Ctx, IA->getConstraintString());
1495 buildMD(TyC),
1496 MetadataAsValue::get(Ctx, MDNode::get(Ctx, ConstraintString))};
1497 for (unsigned OpIdx = 0; OpIdx < Call.arg_size(); OpIdx++)
1498 Args.push_back(Call.getArgOperand(OpIdx));
1499
1501 B.SetInsertPoint(&Call);
1502 B.CreateIntrinsic(Intrinsic::spv_inline_asm, {Args});
1503 return &Call;
1504}
1505
1506// Use a tip about rounding mode to create a decoration.
1507void SPIRVEmitIntrinsics::useRoundingMode(ConstrainedFPIntrinsic *FPI,
1508 IRBuilder<> &B) {
1509 std::optional<RoundingMode> RM = FPI->getRoundingMode();
1510 if (!RM.has_value())
1511 return;
1512 unsigned RoundingModeDeco = std::numeric_limits<unsigned>::max();
1513 switch (RM.value()) {
1514 default:
1515 // ignore unknown rounding modes
1516 break;
1517 case RoundingMode::NearestTiesToEven:
1518 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTE;
1519 break;
1520 case RoundingMode::TowardNegative:
1521 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTN;
1522 break;
1523 case RoundingMode::TowardPositive:
1524 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTP;
1525 break;
1526 case RoundingMode::TowardZero:
1527 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTZ;
1528 break;
1529 case RoundingMode::Dynamic:
1530 case RoundingMode::NearestTiesToAway:
1531 // TODO: check if supported
1532 break;
1533 }
1534 if (RoundingModeDeco == std::numeric_limits<unsigned>::max())
1535 return;
1536 // Convert the tip about rounding mode into a decoration record.
1537 createRoundingModeDecoration(FPI, RoundingModeDeco, B);
1538}
1539
1540Instruction *SPIRVEmitIntrinsics::visitSwitchInst(SwitchInst &I) {
1541 BasicBlock *ParentBB = I.getParent();
1542 IRBuilder<> B(ParentBB);
1543 B.SetInsertPoint(&I);
1546 for (auto &Op : I.operands()) {
1547 if (Op.get()->getType()->isSized()) {
1548 Args.push_back(Op);
1549 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(Op.get())) {
1550 BBCases.push_back(BB);
1551 Args.push_back(BlockAddress::get(BB->getParent(), BB));
1552 } else {
1553 report_fatal_error("Unexpected switch operand");
1554 }
1555 }
1556 CallInst *NewI = B.CreateIntrinsic(Intrinsic::spv_switch,
1557 {I.getOperand(0)->getType()}, {Args});
1558 // remove switch to avoid its unneeded and undesirable unwrap into branches
1559 // and conditions
1560 replaceAllUsesWith(&I, NewI);
1561 I.eraseFromParent();
1562 // insert artificial and temporary instruction to preserve valid CFG,
1563 // it will be removed after IR translation pass
1564 B.SetInsertPoint(ParentBB);
1565 IndirectBrInst *BrI = B.CreateIndirectBr(
1566 Constant::getNullValue(PointerType::getUnqual(ParentBB->getContext())),
1567 BBCases.size());
1568 for (BasicBlock *BBCase : BBCases)
1569 BrI->addDestination(BBCase);
1570 return BrI;
1571}
1572
1573Instruction *SPIRVEmitIntrinsics::visitGetElementPtrInst(GetElementPtrInst &I) {
1574 if (I.getSourceElementType() == IntegerType::getInt8Ty(CurrF->getContext()) &&
1576 Instruction *Result = buildLogicalAccessChainFromGEP(I);
1577 if (Result)
1578 return Result;
1579 }
1580
1581 IRBuilder<> B(I.getParent());
1582 B.SetInsertPoint(&I);
1583 SmallVector<Type *, 2> Types = {I.getType(), I.getOperand(0)->getType()};
1585 Args.push_back(B.getInt1(I.isInBounds()));
1586 llvm::append_range(Args, I.operands());
1587 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args});
1588 replaceAllUsesWithAndErase(B, &I, NewI);
1589 return NewI;
1590}
1591
1592Instruction *SPIRVEmitIntrinsics::visitBitCastInst(BitCastInst &I) {
1593 IRBuilder<> B(I.getParent());
1594 B.SetInsertPoint(&I);
1595 Value *Source = I.getOperand(0);
1596
1597 // SPIR-V, contrary to LLVM 17+ IR, supports bitcasts between pointers of
1598 // varying element types. In case of IR coming from older versions of LLVM
1599 // such bitcasts do not provide sufficient information, should be just skipped
1600 // here, and handled in insertPtrCastOrAssignTypeInstr.
1601 if (isPointerTy(I.getType())) {
1602 replaceAllUsesWith(&I, Source);
1603 I.eraseFromParent();
1604 return nullptr;
1605 }
1606
1607 SmallVector<Type *, 2> Types = {I.getType(), Source->getType()};
1608 SmallVector<Value *> Args(I.op_begin(), I.op_end());
1609 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_bitcast, {Types}, {Args});
1610 replaceAllUsesWithAndErase(B, &I, NewI);
1611 return NewI;
1612}
1613
1614void SPIRVEmitIntrinsics::insertAssignPtrTypeTargetExt(
1615 TargetExtType *AssignedType, Value *V, IRBuilder<> &B) {
1616 Type *VTy = V->getType();
1617
1618 // A couple of sanity checks.
1619 assert((isPointerTy(VTy)) && "Expect a pointer type!");
1620 if (Type *ElemTy = getPointeeType(VTy))
1621 if (ElemTy != AssignedType)
1622 report_fatal_error("Unexpected pointer element type!");
1623
1624 CallInst *AssignCI = GR->findAssignPtrTypeInstr(V);
1625 if (!AssignCI) {
1626 GR->buildAssignType(B, AssignedType, V);
1627 return;
1628 }
1629
1630 Type *CurrentType =
1632 cast<MetadataAsValue>(AssignCI->getOperand(1))->getMetadata())
1633 ->getType();
1634 if (CurrentType == AssignedType)
1635 return;
1636
1637 // Builtin types cannot be redeclared or casted.
1638 if (CurrentType->isTargetExtTy())
1639 report_fatal_error("Type mismatch " + CurrentType->getTargetExtName() +
1640 "/" + AssignedType->getTargetExtName() +
1641 " for value " + V->getName(),
1642 false);
1643
1644 // Our previous guess about the type seems to be wrong, let's update
1645 // inferred type according to a new, more precise type information.
1646 GR->updateAssignType(AssignCI, V, getNormalizedPoisonValue(AssignedType));
1647}
1648
1649void SPIRVEmitIntrinsics::replacePointerOperandWithPtrCast(
1650 Instruction *I, Value *Pointer, Type *ExpectedElementType,
1651 unsigned OperandToReplace, IRBuilder<> &B) {
1652 TypeValidated.insert(I);
1653
1654 // Do not emit spv_ptrcast if Pointer's element type is ExpectedElementType
1655 Type *PointerElemTy = deduceElementTypeHelper(Pointer, false);
1656 if (PointerElemTy == ExpectedElementType ||
1657 isEquivalentTypes(PointerElemTy, ExpectedElementType))
1658 return;
1659
1661 Value *ExpectedElementVal = getNormalizedPoisonValue(ExpectedElementType);
1662 MetadataAsValue *VMD = buildMD(ExpectedElementVal);
1663 unsigned AddressSpace = getPointerAddressSpace(Pointer->getType());
1664 bool FirstPtrCastOrAssignPtrType = true;
1665
1666 // Do not emit new spv_ptrcast if equivalent one already exists or when
1667 // spv_assign_ptr_type already targets this pointer with the same element
1668 // type.
1669 if (Pointer->hasUseList()) {
1670 for (auto User : Pointer->users()) {
1671 auto *II = dyn_cast<IntrinsicInst>(User);
1672 if (!II ||
1673 (II->getIntrinsicID() != Intrinsic::spv_assign_ptr_type &&
1674 II->getIntrinsicID() != Intrinsic::spv_ptrcast) ||
1675 II->getOperand(0) != Pointer)
1676 continue;
1677
1678 // There is some spv_ptrcast/spv_assign_ptr_type already targeting this
1679 // pointer.
1680 FirstPtrCastOrAssignPtrType = false;
1681 if (II->getOperand(1) != VMD ||
1682 dyn_cast<ConstantInt>(II->getOperand(2))->getSExtValue() !=
1684 continue;
1685
1686 // The spv_ptrcast/spv_assign_ptr_type targeting this pointer is of the
1687 // same element type and address space.
1688 if (II->getIntrinsicID() != Intrinsic::spv_ptrcast)
1689 return;
1690
1691 // This must be a spv_ptrcast, do not emit new if this one has the same BB
1692 // as I. Otherwise, search for other spv_ptrcast/spv_assign_ptr_type.
1693 if (II->getParent() != I->getParent())
1694 continue;
1695
1696 I->setOperand(OperandToReplace, II);
1697 return;
1698 }
1699 }
1700
1701 if (isa<Instruction>(Pointer) || isa<Argument>(Pointer)) {
1702 if (FirstPtrCastOrAssignPtrType) {
1703 // If this would be the first spv_ptrcast, do not emit spv_ptrcast and
1704 // emit spv_assign_ptr_type instead.
1705 GR->buildAssignPtr(B, ExpectedElementType, Pointer);
1706 return;
1707 } else if (isTodoType(Pointer)) {
1708 eraseTodoType(Pointer);
1709 if (!isa<CallInst>(Pointer) && !isa<GetElementPtrInst>(Pointer)) {
1710 // If this wouldn't be the first spv_ptrcast but existing type info is
1711 // uncomplete, update spv_assign_ptr_type arguments.
1712 if (CallInst *AssignCI = GR->findAssignPtrTypeInstr(Pointer)) {
1713 Type *PrevElemTy = GR->findDeducedElementType(Pointer);
1714 assert(PrevElemTy);
1715 DenseSet<std::pair<Value *, Value *>> VisitedSubst{
1716 std::make_pair(I, Pointer)};
1717 GR->updateAssignType(AssignCI, Pointer, ExpectedElementVal);
1718 propagateElemType(Pointer, PrevElemTy, VisitedSubst);
1719 } else {
1720 GR->buildAssignPtr(B, ExpectedElementType, Pointer);
1721 }
1722 return;
1723 }
1724 }
1725 }
1726
1727 // Emit spv_ptrcast
1728 SmallVector<Type *, 2> Types = {Pointer->getType(), Pointer->getType()};
1729 SmallVector<Value *, 2> Args = {Pointer, VMD, B.getInt32(AddressSpace)};
1730 auto *PtrCastI = B.CreateIntrinsic(Intrinsic::spv_ptrcast, {Types}, Args);
1731 I->setOperand(OperandToReplace, PtrCastI);
1732 // We need to set up a pointee type for the newly created spv_ptrcast.
1733 GR->buildAssignPtr(B, ExpectedElementType, PtrCastI);
1734}
1735
1736void SPIRVEmitIntrinsics::insertPtrCastOrAssignTypeInstr(Instruction *I,
1737 IRBuilder<> &B) {
1738 // Handle basic instructions:
1739 StoreInst *SI = dyn_cast<StoreInst>(I);
1740 if (IsKernelArgInt8(CurrF, SI)) {
1741 replacePointerOperandWithPtrCast(
1742 I, SI->getValueOperand(), IntegerType::getInt8Ty(CurrF->getContext()),
1743 0, B);
1744 }
1745 if (SI) {
1746 Value *Op = SI->getValueOperand();
1747 Value *Pointer = SI->getPointerOperand();
1748 Type *OpTy = Op->getType();
1749 if (auto *OpI = dyn_cast<Instruction>(Op))
1750 OpTy = restoreMutatedType(GR, OpI, OpTy);
1751 if (OpTy == Op->getType())
1752 OpTy = deduceElementTypeByValueDeep(OpTy, Op, false);
1753 replacePointerOperandWithPtrCast(I, Pointer, OpTy, 1, B);
1754 return;
1755 }
1756 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1757 Value *Pointer = LI->getPointerOperand();
1758 Type *OpTy = LI->getType();
1759 if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
1760 if (Type *ElemTy = GR->findDeducedElementType(LI)) {
1761 OpTy = getTypedPointerWrapper(ElemTy, PtrTy->getAddressSpace());
1762 } else {
1763 Type *NewOpTy = OpTy;
1764 OpTy = deduceElementTypeByValueDeep(OpTy, LI, false);
1765 if (OpTy == NewOpTy)
1766 insertTodoType(Pointer);
1767 }
1768 }
1769 replacePointerOperandWithPtrCast(I, Pointer, OpTy, 0, B);
1770 return;
1771 }
1772 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
1773 Value *Pointer = GEPI->getPointerOperand();
1774 Type *OpTy = nullptr;
1775
1776 // Knowing the accessed type is mandatory for logical SPIR-V. Sadly,
1777 // the GEP source element type should not be used for this purpose, and
1778 // the alternative type-scavenging method is not working.
1779 // Physical SPIR-V can work around this, but not logical, hence still
1780 // try to rely on the broken type scavenging for logical.
1781 bool IsRewrittenGEP =
1782 GEPI->getSourceElementType() == IntegerType::getInt8Ty(I->getContext());
1783 if (IsRewrittenGEP && TM->getSubtargetImpl()->isLogicalSPIRV()) {
1784 Value *Src = getPointerRoot(Pointer);
1785 OpTy = GR->findDeducedElementType(Src);
1786 }
1787
1788 // In all cases, fall back to the GEP type if type scavenging failed.
1789 if (!OpTy)
1790 OpTy = GEPI->getSourceElementType();
1791
1792 replacePointerOperandWithPtrCast(I, Pointer, OpTy, 0, B);
1793 if (isNestedPointer(OpTy))
1794 insertTodoType(Pointer);
1795 return;
1796 }
1797
1798 // TODO: review and merge with existing logics:
1799 // Handle calls to builtins (non-intrinsics):
1800 CallInst *CI = dyn_cast<CallInst>(I);
1801 if (!CI || CI->isIndirectCall() || CI->isInlineAsm() ||
1803 return;
1804
1805 // collect information about formal parameter types
1806 std::string DemangledName =
1808 Function *CalledF = CI->getCalledFunction();
1809 SmallVector<Type *, 4> CalledArgTys;
1810 bool HaveTypes = false;
1811 for (unsigned OpIdx = 0; OpIdx < CalledF->arg_size(); ++OpIdx) {
1812 Argument *CalledArg = CalledF->getArg(OpIdx);
1813 Type *ArgType = CalledArg->getType();
1814 if (!isPointerTy(ArgType)) {
1815 CalledArgTys.push_back(nullptr);
1816 } else if (Type *ArgTypeElem = getPointeeType(ArgType)) {
1817 CalledArgTys.push_back(ArgTypeElem);
1818 HaveTypes = true;
1819 } else {
1820 Type *ElemTy = GR->findDeducedElementType(CalledArg);
1821 if (!ElemTy && hasPointeeTypeAttr(CalledArg))
1822 ElemTy = getPointeeTypeByAttr(CalledArg);
1823 if (!ElemTy) {
1824 ElemTy = getPointeeTypeByCallInst(DemangledName, CalledF, OpIdx);
1825 if (ElemTy) {
1826 GR->addDeducedElementType(CalledArg, normalizeType(ElemTy));
1827 } else {
1828 for (User *U : CalledArg->users()) {
1829 if (Instruction *Inst = dyn_cast<Instruction>(U)) {
1830 if ((ElemTy = deduceElementTypeHelper(Inst, false)) != nullptr)
1831 break;
1832 }
1833 }
1834 }
1835 }
1836 HaveTypes |= ElemTy != nullptr;
1837 CalledArgTys.push_back(ElemTy);
1838 }
1839 }
1840
1841 if (DemangledName.empty() && !HaveTypes)
1842 return;
1843
1844 for (unsigned OpIdx = 0; OpIdx < CI->arg_size(); OpIdx++) {
1845 Value *ArgOperand = CI->getArgOperand(OpIdx);
1846 if (!isPointerTy(ArgOperand->getType()))
1847 continue;
1848
1849 // Constants (nulls/undefs) are handled in insertAssignPtrTypeIntrs()
1850 if (!isa<Instruction>(ArgOperand) && !isa<Argument>(ArgOperand)) {
1851 // However, we may have assumptions about the formal argument's type and
1852 // may have a need to insert a ptr cast for the actual parameter of this
1853 // call.
1854 Argument *CalledArg = CalledF->getArg(OpIdx);
1855 if (!GR->findDeducedElementType(CalledArg))
1856 continue;
1857 }
1858
1859 Type *ExpectedType =
1860 OpIdx < CalledArgTys.size() ? CalledArgTys[OpIdx] : nullptr;
1861 if (!ExpectedType && !DemangledName.empty())
1862 ExpectedType = SPIRV::parseBuiltinCallArgumentBaseType(
1863 DemangledName, OpIdx, I->getContext());
1864 if (!ExpectedType || ExpectedType->isVoidTy())
1865 continue;
1866
1867 if (ExpectedType->isTargetExtTy() &&
1869 insertAssignPtrTypeTargetExt(cast<TargetExtType>(ExpectedType),
1870 ArgOperand, B);
1871 else
1872 replacePointerOperandWithPtrCast(CI, ArgOperand, ExpectedType, OpIdx, B);
1873 }
1874}
1875
1876Instruction *SPIRVEmitIntrinsics::visitInsertElementInst(InsertElementInst &I) {
1877 // If it's a <1 x Type> vector type, don't modify it. It's not a legal vector
1878 // type in LLT and IRTranslator will replace it by the scalar.
1879 if (isVector1(I.getType()))
1880 return &I;
1881
1882 SmallVector<Type *, 4> Types = {I.getType(), I.getOperand(0)->getType(),
1883 I.getOperand(1)->getType(),
1884 I.getOperand(2)->getType()};
1885 IRBuilder<> B(I.getParent());
1886 B.SetInsertPoint(&I);
1887 SmallVector<Value *> Args(I.op_begin(), I.op_end());
1888 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_insertelt, {Types}, {Args});
1889 replaceAllUsesWithAndErase(B, &I, NewI);
1890 return NewI;
1891}
1892
1894SPIRVEmitIntrinsics::visitExtractElementInst(ExtractElementInst &I) {
1895 // If it's a <1 x Type> vector type, don't modify it. It's not a legal vector
1896 // type in LLT and IRTranslator will replace it by the scalar.
1897 if (isVector1(I.getVectorOperandType()))
1898 return &I;
1899
1900 IRBuilder<> B(I.getParent());
1901 B.SetInsertPoint(&I);
1902 SmallVector<Type *, 3> Types = {I.getType(), I.getVectorOperandType(),
1903 I.getIndexOperand()->getType()};
1904 SmallVector<Value *, 2> Args = {I.getVectorOperand(), I.getIndexOperand()};
1905 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_extractelt, {Types}, {Args});
1906 replaceAllUsesWithAndErase(B, &I, NewI);
1907 return NewI;
1908}
1909
1910Instruction *SPIRVEmitIntrinsics::visitInsertValueInst(InsertValueInst &I) {
1911 IRBuilder<> B(I.getParent());
1912 B.SetInsertPoint(&I);
1913 SmallVector<Type *, 1> Types = {I.getInsertedValueOperand()->getType()};
1915 Value *AggregateOp = I.getAggregateOperand();
1916 if (isa<UndefValue>(AggregateOp))
1917 Args.push_back(UndefValue::get(B.getInt32Ty()));
1918 else
1919 Args.push_back(AggregateOp);
1920 Args.push_back(I.getInsertedValueOperand());
1921 for (auto &Op : I.indices())
1922 Args.push_back(B.getInt32(Op));
1923 Instruction *NewI =
1924 B.CreateIntrinsic(Intrinsic::spv_insertv, {Types}, {Args});
1925 replaceMemInstrUses(&I, NewI, B);
1926 return NewI;
1927}
1928
1929Instruction *SPIRVEmitIntrinsics::visitExtractValueInst(ExtractValueInst &I) {
1930 if (I.getAggregateOperand()->getType()->isAggregateType())
1931 return &I;
1932 IRBuilder<> B(I.getParent());
1933 B.SetInsertPoint(&I);
1934 SmallVector<Value *> Args(I.operands());
1935 for (auto &Op : I.indices())
1936 Args.push_back(B.getInt32(Op));
1937 auto *NewI =
1938 B.CreateIntrinsic(Intrinsic::spv_extractv, {I.getType()}, {Args});
1939 replaceAllUsesWithAndErase(B, &I, NewI);
1940 return NewI;
1941}
1942
1943Instruction *SPIRVEmitIntrinsics::visitLoadInst(LoadInst &I) {
1944 if (!I.getType()->isAggregateType())
1945 return &I;
1946 IRBuilder<> B(I.getParent());
1947 B.SetInsertPoint(&I);
1948 TrackConstants = false;
1949 const auto *TLI = TM->getSubtargetImpl()->getTargetLowering();
1951 TLI->getLoadMemOperandFlags(I, CurrF->getDataLayout());
1952 auto *NewI =
1953 B.CreateIntrinsic(Intrinsic::spv_load, {I.getOperand(0)->getType()},
1954 {I.getPointerOperand(), B.getInt16(Flags),
1955 B.getInt8(I.getAlign().value())});
1956 replaceMemInstrUses(&I, NewI, B);
1957 return NewI;
1958}
1959
1960Instruction *SPIRVEmitIntrinsics::visitStoreInst(StoreInst &I) {
1961 if (!AggrStores.contains(&I))
1962 return &I;
1963 IRBuilder<> B(I.getParent());
1964 B.SetInsertPoint(&I);
1965 TrackConstants = false;
1966 const auto *TLI = TM->getSubtargetImpl()->getTargetLowering();
1968 TLI->getStoreMemOperandFlags(I, CurrF->getDataLayout());
1969 auto *PtrOp = I.getPointerOperand();
1970 auto *NewI = B.CreateIntrinsic(
1971 Intrinsic::spv_store, {I.getValueOperand()->getType(), PtrOp->getType()},
1972 {I.getValueOperand(), PtrOp, B.getInt16(Flags),
1973 B.getInt8(I.getAlign().value())});
1974 NewI->copyMetadata(I);
1975 I.eraseFromParent();
1976 return NewI;
1977}
1978
1979Instruction *SPIRVEmitIntrinsics::visitAllocaInst(AllocaInst &I) {
1980 Value *ArraySize = nullptr;
1981 if (I.isArrayAllocation()) {
1982 const SPIRVSubtarget *STI = TM->getSubtargetImpl(*I.getFunction());
1983 if (!STI->canUseExtension(
1984 SPIRV::Extension::SPV_INTEL_variable_length_array))
1986 "array allocation: this instruction requires the following "
1987 "SPIR-V extension: SPV_INTEL_variable_length_array",
1988 false);
1989 ArraySize = I.getArraySize();
1990 }
1991 IRBuilder<> B(I.getParent());
1992 B.SetInsertPoint(&I);
1993 TrackConstants = false;
1994 Type *PtrTy = I.getType();
1995 auto *NewI =
1996 ArraySize
1997 ? B.CreateIntrinsic(Intrinsic::spv_alloca_array,
1998 {PtrTy, ArraySize->getType()},
1999 {ArraySize, B.getInt8(I.getAlign().value())})
2000 : B.CreateIntrinsic(Intrinsic::spv_alloca, {PtrTy},
2001 {B.getInt8(I.getAlign().value())});
2002 replaceAllUsesWithAndErase(B, &I, NewI);
2003 return NewI;
2004}
2005
2006Instruction *SPIRVEmitIntrinsics::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
2007 assert(I.getType()->isAggregateType() && "Aggregate result is expected");
2008 IRBuilder<> B(I.getParent());
2009 B.SetInsertPoint(&I);
2010 SmallVector<Value *> Args(I.operands());
2011 Args.push_back(B.getInt32(
2012 static_cast<uint32_t>(getMemScope(I.getContext(), I.getSyncScopeID()))));
2013 Args.push_back(B.getInt32(
2014 static_cast<uint32_t>(getMemSemantics(I.getSuccessOrdering()))));
2015 Args.push_back(B.getInt32(
2016 static_cast<uint32_t>(getMemSemantics(I.getFailureOrdering()))));
2017 auto *NewI = B.CreateIntrinsic(Intrinsic::spv_cmpxchg,
2018 {I.getPointerOperand()->getType()}, {Args});
2019 replaceMemInstrUses(&I, NewI, B);
2020 return NewI;
2021}
2022
2023Instruction *SPIRVEmitIntrinsics::visitUnreachableInst(UnreachableInst &I) {
2024 IRBuilder<> B(I.getParent());
2025 B.SetInsertPoint(&I);
2026 B.CreateIntrinsic(Intrinsic::spv_unreachable, {});
2027 return &I;
2028}
2029
2030void SPIRVEmitIntrinsics::processGlobalValue(GlobalVariable &GV,
2031 IRBuilder<> &B) {
2032 // Skip special artificial variables.
2033 static const StringSet<> ArtificialGlobals{"llvm.global.annotations",
2034 "llvm.compiler.used"};
2035
2036 if (ArtificialGlobals.contains(GV.getName()))
2037 return;
2038
2039 Constant *Init = nullptr;
2040 if (hasInitializer(&GV)) {
2041 // Deduce element type and store results in Global Registry.
2042 // Result is ignored, because TypedPointerType is not supported
2043 // by llvm IR general logic.
2044 deduceElementTypeHelper(&GV, false);
2045 Init = GV.getInitializer();
2046 Type *Ty = isAggrConstForceInt32(Init) ? B.getInt32Ty() : Init->getType();
2047 Constant *Const = isAggrConstForceInt32(Init) ? B.getInt32(1) : Init;
2048 auto *InitInst = B.CreateIntrinsic(Intrinsic::spv_init_global,
2049 {GV.getType(), Ty}, {&GV, Const});
2050 InitInst->setArgOperand(1, Init);
2051 }
2052 if (!Init && GV.use_empty())
2053 B.CreateIntrinsic(Intrinsic::spv_unref_global, GV.getType(), &GV);
2054}
2055
2056// Return true, if we can't decide what is the pointee type now and will get
2057// back to the question later. Return false is spv_assign_ptr_type is not needed
2058// or can be inserted immediately.
2059bool SPIRVEmitIntrinsics::insertAssignPtrTypeIntrs(Instruction *I,
2060 IRBuilder<> &B,
2061 bool UnknownElemTypeI8) {
2063 if (!isPointerTy(I->getType()) || !requireAssignType(I))
2064 return false;
2065
2067 if (Type *ElemTy = deduceElementType(I, UnknownElemTypeI8)) {
2068 GR->buildAssignPtr(B, ElemTy, I);
2069 return false;
2070 }
2071 return true;
2072}
2073
2074void SPIRVEmitIntrinsics::insertAssignTypeIntrs(Instruction *I,
2075 IRBuilder<> &B) {
2076 // TODO: extend the list of functions with known result types
2077 static StringMap<unsigned> ResTypeWellKnown = {
2078 {"async_work_group_copy", WellKnownTypes::Event},
2079 {"async_work_group_strided_copy", WellKnownTypes::Event},
2080 {"__spirv_GroupAsyncCopy", WellKnownTypes::Event}};
2081
2083
2084 bool IsKnown = false;
2085 if (auto *CI = dyn_cast<CallInst>(I)) {
2086 if (!CI->isIndirectCall() && !CI->isInlineAsm() &&
2087 CI->getCalledFunction() && !CI->getCalledFunction()->isIntrinsic()) {
2088 Function *CalledF = CI->getCalledFunction();
2089 std::string DemangledName =
2091 FPDecorationId DecorationId = FPDecorationId::NONE;
2092 if (DemangledName.length() > 0)
2093 DemangledName =
2094 SPIRV::lookupBuiltinNameHelper(DemangledName, &DecorationId);
2095 auto ResIt = ResTypeWellKnown.find(DemangledName);
2096 if (ResIt != ResTypeWellKnown.end()) {
2097 IsKnown = true;
2099 switch (ResIt->second) {
2100 case WellKnownTypes::Event:
2101 GR->buildAssignType(
2102 B, TargetExtType::get(I->getContext(), "spirv.Event"), I);
2103 break;
2104 }
2105 }
2106 // check if a floating rounding mode or saturation info is present
2107 switch (DecorationId) {
2108 default:
2109 break;
2110 case FPDecorationId::SAT:
2112 break;
2113 case FPDecorationId::RTE:
2115 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTE, B);
2116 break;
2117 case FPDecorationId::RTZ:
2119 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTZ, B);
2120 break;
2121 case FPDecorationId::RTP:
2123 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTP, B);
2124 break;
2125 case FPDecorationId::RTN:
2127 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTN, B);
2128 break;
2129 }
2130 }
2131 }
2132
2133 Type *Ty = I->getType();
2134 if (!IsKnown && !Ty->isVoidTy() && !isPointerTy(Ty) && requireAssignType(I)) {
2136 Type *TypeToAssign = Ty;
2137 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
2138 if (II->getIntrinsicID() == Intrinsic::spv_const_composite ||
2139 II->getIntrinsicID() == Intrinsic::spv_undef) {
2140 auto It = AggrConstTypes.find(II);
2141 if (It == AggrConstTypes.end())
2142 report_fatal_error("Unknown composite intrinsic type");
2143 TypeToAssign = It->second;
2144 }
2145 }
2146 TypeToAssign = restoreMutatedType(GR, I, TypeToAssign);
2147 GR->buildAssignType(B, TypeToAssign, I);
2148 }
2149 for (const auto &Op : I->operands()) {
2151 // Check GetElementPtrConstantExpr case.
2154 Type *OpTy = Op->getType();
2155 if (isa<UndefValue>(Op) && OpTy->isAggregateType()) {
2156 CallInst *AssignCI =
2157 buildIntrWithMD(Intrinsic::spv_assign_type, {B.getInt32Ty()}, Op,
2158 UndefValue::get(B.getInt32Ty()), {}, B);
2159 GR->addAssignPtrTypeInstr(Op, AssignCI);
2160 } else if (!isa<Instruction>(Op)) {
2161 Type *OpTy = Op->getType();
2162 Type *OpTyElem = getPointeeType(OpTy);
2163 if (OpTyElem) {
2164 GR->buildAssignPtr(B, OpTyElem, Op);
2165 } else if (isPointerTy(OpTy)) {
2166 Type *ElemTy = GR->findDeducedElementType(Op);
2167 GR->buildAssignPtr(B, ElemTy ? ElemTy : deduceElementType(Op, true),
2168 Op);
2169 } else {
2170 Value *OpTyVal = Op;
2171 if (OpTy->isTargetExtTy()) {
2172 // We need to do this in order to be consistent with how target ext
2173 // types are handled in `processInstrAfterVisit`
2174 OpTyVal = getNormalizedPoisonValue(OpTy);
2175 }
2176 CallInst *AssignCI =
2177 buildIntrWithMD(Intrinsic::spv_assign_type, {OpTy},
2178 getNormalizedPoisonValue(OpTy), OpTyVal, {}, B);
2179 GR->addAssignPtrTypeInstr(OpTyVal, AssignCI);
2180 }
2181 }
2182 }
2183 }
2184}
2185
2186bool SPIRVEmitIntrinsics::shouldTryToAddMemAliasingDecoration(
2187 Instruction *Inst) {
2188 const SPIRVSubtarget *STI = TM->getSubtargetImpl(*Inst->getFunction());
2189 if (!STI->canUseExtension(SPIRV::Extension::SPV_INTEL_memory_access_aliasing))
2190 return false;
2191 // Add aliasing decorations to internal load and store intrinsics
2192 // and atomic instructions, skipping atomic store as it won't have ID to
2193 // attach the decoration.
2194 CallInst *CI = dyn_cast<CallInst>(Inst);
2195 if (!CI)
2196 return false;
2197 if (Function *Fun = CI->getCalledFunction()) {
2198 if (Fun->isIntrinsic()) {
2199 switch (Fun->getIntrinsicID()) {
2200 case Intrinsic::spv_load:
2201 case Intrinsic::spv_store:
2202 return true;
2203 default:
2204 return false;
2205 }
2206 }
2208 const std::string Prefix = "__spirv_Atomic";
2209 const bool IsAtomic = Name.find(Prefix) == 0;
2210
2211 if (!Fun->getReturnType()->isVoidTy() && IsAtomic)
2212 return true;
2213 }
2214 return false;
2215}
2216
2217void SPIRVEmitIntrinsics::insertSpirvDecorations(Instruction *I,
2218 IRBuilder<> &B) {
2219 if (MDNode *MD = I->getMetadata("spirv.Decorations")) {
2221 B.CreateIntrinsic(Intrinsic::spv_assign_decoration, {I->getType()},
2222 {I, MetadataAsValue::get(I->getContext(), MD)});
2223 }
2224 // Lower alias.scope/noalias metadata
2225 {
2226 auto processMemAliasingDecoration = [&](unsigned Kind) {
2227 if (MDNode *AliasListMD = I->getMetadata(Kind)) {
2228 if (shouldTryToAddMemAliasingDecoration(I)) {
2229 uint32_t Dec = Kind == LLVMContext::MD_alias_scope
2230 ? SPIRV::Decoration::AliasScopeINTEL
2231 : SPIRV::Decoration::NoAliasINTEL;
2233 I, ConstantInt::get(B.getInt32Ty(), Dec),
2234 MetadataAsValue::get(I->getContext(), AliasListMD)};
2236 B.CreateIntrinsic(Intrinsic::spv_assign_aliasing_decoration,
2237 {I->getType()}, {Args});
2238 }
2239 }
2240 };
2241 processMemAliasingDecoration(LLVMContext::MD_alias_scope);
2242 processMemAliasingDecoration(LLVMContext::MD_noalias);
2243 }
2244 // MD_fpmath
2245 if (MDNode *MD = I->getMetadata(LLVMContext::MD_fpmath)) {
2246 const SPIRVSubtarget *STI = TM->getSubtargetImpl(*I->getFunction());
2247 bool AllowFPMaxError =
2248 STI->canUseExtension(SPIRV::Extension::SPV_INTEL_fp_max_error);
2249 if (!AllowFPMaxError)
2250 return;
2251
2253 B.CreateIntrinsic(Intrinsic::spv_assign_fpmaxerror_decoration,
2254 {I->getType()},
2255 {I, MetadataAsValue::get(I->getContext(), MD)});
2256 }
2257}
2258
2260 const Module &M,
2262 &FPFastMathDefaultInfoMap,
2263 Function *F) {
2264 auto it = FPFastMathDefaultInfoMap.find(F);
2265 if (it != FPFastMathDefaultInfoMap.end())
2266 return it->second;
2267
2268 // If the map does not contain the entry, create a new one. Initialize it to
2269 // contain all 3 elements sorted by bit width of target type: {half, float,
2270 // double}.
2271 SPIRV::FPFastMathDefaultInfoVector FPFastMathDefaultInfoVec;
2272 FPFastMathDefaultInfoVec.emplace_back(Type::getHalfTy(M.getContext()),
2273 SPIRV::FPFastMathMode::None);
2274 FPFastMathDefaultInfoVec.emplace_back(Type::getFloatTy(M.getContext()),
2275 SPIRV::FPFastMathMode::None);
2276 FPFastMathDefaultInfoVec.emplace_back(Type::getDoubleTy(M.getContext()),
2277 SPIRV::FPFastMathMode::None);
2278 return FPFastMathDefaultInfoMap[F] = std::move(FPFastMathDefaultInfoVec);
2279}
2280
2282 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec,
2283 const Type *Ty) {
2284 size_t BitWidth = Ty->getScalarSizeInBits();
2285 int Index =
2287 BitWidth);
2288 assert(Index >= 0 && Index < 3 &&
2289 "Expected FPFastMathDefaultInfo for half, float, or double");
2290 assert(FPFastMathDefaultInfoVec.size() == 3 &&
2291 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2292 return FPFastMathDefaultInfoVec[Index];
2293}
2294
2295void SPIRVEmitIntrinsics::insertConstantsForFPFastMathDefault(Module &M) {
2296 const SPIRVSubtarget *ST = TM->getSubtargetImpl();
2297 if (!ST->canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2))
2298 return;
2299
2300 // Store the FPFastMathDefaultInfo in the FPFastMathDefaultInfoMap.
2301 // We need the entry point (function) as the key, and the target
2302 // type and flags as the value.
2303 // We also need to check ContractionOff and SignedZeroInfNanPreserve
2304 // execution modes, as they are now deprecated and must be replaced
2305 // with FPFastMathDefaultInfo.
2306 auto Node = M.getNamedMetadata("spirv.ExecutionMode");
2307 if (!Node) {
2308 if (!M.getNamedMetadata("opencl.enable.FP_CONTRACT")) {
2309 // This requires emitting ContractionOff. However, because
2310 // ContractionOff is now deprecated, we need to replace it with
2311 // FPFastMathDefaultInfo with FP Fast Math Mode bitmask set to all 0.
2312 // We need to create the constant for that.
2313
2314 // Create constant instruction with the bitmask flags.
2315 Constant *InitValue =
2316 ConstantInt::get(Type::getInt32Ty(M.getContext()), 0);
2317 // TODO: Reuse constant if there is one already with the required
2318 // value.
2319 [[maybe_unused]] GlobalVariable *GV =
2320 new GlobalVariable(M, // Module
2321 Type::getInt32Ty(M.getContext()), // Type
2322 true, // isConstant
2324 InitValue // Initializer
2325 );
2326 }
2327 return;
2328 }
2329
2330 // The table maps function pointers to their default FP fast math info. It
2331 // can be assumed that the SmallVector is sorted by the bit width of the
2332 // type. The first element is the smallest bit width, and the last element
2333 // is the largest bit width, therefore, we will have {half, float, double}
2334 // in the order of their bit widths.
2335 DenseMap<Function *, SPIRV::FPFastMathDefaultInfoVector>
2336 FPFastMathDefaultInfoMap;
2337
2338 for (unsigned i = 0; i < Node->getNumOperands(); i++) {
2339 MDNode *MDN = cast<MDNode>(Node->getOperand(i));
2340 assert(MDN->getNumOperands() >= 2 && "Expected at least 2 operands");
2342 cast<ConstantAsMetadata>(MDN->getOperand(0))->getValue());
2343 const auto EM =
2345 cast<ConstantAsMetadata>(MDN->getOperand(1))->getValue())
2346 ->getZExtValue();
2347 if (EM == SPIRV::ExecutionMode::FPFastMathDefault) {
2348 assert(MDN->getNumOperands() == 4 &&
2349 "Expected 4 operands for FPFastMathDefault");
2350 const Type *T = cast<ValueAsMetadata>(MDN->getOperand(2))->getType();
2351 unsigned Flags =
2353 cast<ConstantAsMetadata>(MDN->getOperand(3))->getValue())
2354 ->getZExtValue();
2355 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2356 getOrCreateFPFastMathDefaultInfoVec(M, FPFastMathDefaultInfoMap, F);
2357 SPIRV::FPFastMathDefaultInfo &Info =
2358 getFPFastMathDefaultInfo(FPFastMathDefaultInfoVec, T);
2359 Info.FastMathFlags = Flags;
2360 Info.FPFastMathDefault = true;
2361 } else if (EM == SPIRV::ExecutionMode::ContractionOff) {
2362 assert(MDN->getNumOperands() == 2 &&
2363 "Expected no operands for ContractionOff");
2364
2365 // We need to save this info for every possible FP type, i.e. {half,
2366 // float, double, fp128}.
2367 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2368 getOrCreateFPFastMathDefaultInfoVec(M, FPFastMathDefaultInfoMap, F);
2369 for (SPIRV::FPFastMathDefaultInfo &Info : FPFastMathDefaultInfoVec) {
2370 Info.ContractionOff = true;
2371 }
2372 } else if (EM == SPIRV::ExecutionMode::SignedZeroInfNanPreserve) {
2373 assert(MDN->getNumOperands() == 3 &&
2374 "Expected 1 operand for SignedZeroInfNanPreserve");
2375 unsigned TargetWidth =
2377 cast<ConstantAsMetadata>(MDN->getOperand(2))->getValue())
2378 ->getZExtValue();
2379 // We need to save this info only for the FP type with TargetWidth.
2380 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2381 getOrCreateFPFastMathDefaultInfoVec(M, FPFastMathDefaultInfoMap, F);
2384 assert(Index >= 0 && Index < 3 &&
2385 "Expected FPFastMathDefaultInfo for half, float, or double");
2386 assert(FPFastMathDefaultInfoVec.size() == 3 &&
2387 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2388 FPFastMathDefaultInfoVec[Index].SignedZeroInfNanPreserve = true;
2389 }
2390 }
2391
2392 std::unordered_map<unsigned, GlobalVariable *> GlobalVars;
2393 for (auto &[Func, FPFastMathDefaultInfoVec] : FPFastMathDefaultInfoMap) {
2394 if (FPFastMathDefaultInfoVec.empty())
2395 continue;
2396
2397 for (const SPIRV::FPFastMathDefaultInfo &Info : FPFastMathDefaultInfoVec) {
2398 assert(Info.Ty && "Expected target type for FPFastMathDefaultInfo");
2399 // Skip if none of the execution modes was used.
2400 unsigned Flags = Info.FastMathFlags;
2401 if (Flags == SPIRV::FPFastMathMode::None && !Info.ContractionOff &&
2402 !Info.SignedZeroInfNanPreserve && !Info.FPFastMathDefault)
2403 continue;
2404
2405 // Check if flags are compatible.
2406 if (Info.ContractionOff && (Flags & SPIRV::FPFastMathMode::AllowContract))
2407 report_fatal_error("Conflicting FPFastMathFlags: ContractionOff "
2408 "and AllowContract");
2409
2410 if (Info.SignedZeroInfNanPreserve &&
2411 !(Flags &
2412 (SPIRV::FPFastMathMode::NotNaN | SPIRV::FPFastMathMode::NotInf |
2413 SPIRV::FPFastMathMode::NSZ))) {
2414 if (Info.FPFastMathDefault)
2415 report_fatal_error("Conflicting FPFastMathFlags: "
2416 "SignedZeroInfNanPreserve but at least one of "
2417 "NotNaN/NotInf/NSZ is enabled.");
2418 }
2419
2420 if ((Flags & SPIRV::FPFastMathMode::AllowTransform) &&
2421 !((Flags & SPIRV::FPFastMathMode::AllowReassoc) &&
2422 (Flags & SPIRV::FPFastMathMode::AllowContract))) {
2423 report_fatal_error("Conflicting FPFastMathFlags: "
2424 "AllowTransform requires AllowReassoc and "
2425 "AllowContract to be set.");
2426 }
2427
2428 auto it = GlobalVars.find(Flags);
2429 GlobalVariable *GV = nullptr;
2430 if (it != GlobalVars.end()) {
2431 // Reuse existing global variable.
2432 GV = it->second;
2433 } else {
2434 // Create constant instruction with the bitmask flags.
2435 Constant *InitValue =
2436 ConstantInt::get(Type::getInt32Ty(M.getContext()), Flags);
2437 // TODO: Reuse constant if there is one already with the required
2438 // value.
2439 GV = new GlobalVariable(M, // Module
2440 Type::getInt32Ty(M.getContext()), // Type
2441 true, // isConstant
2443 InitValue // Initializer
2444 );
2445 GlobalVars[Flags] = GV;
2446 }
2447 }
2448 }
2449}
2450
2451void SPIRVEmitIntrinsics::processInstrAfterVisit(Instruction *I,
2452 IRBuilder<> &B) {
2453 auto *II = dyn_cast<IntrinsicInst>(I);
2454 bool IsConstComposite =
2455 II && II->getIntrinsicID() == Intrinsic::spv_const_composite;
2456 if (IsConstComposite && TrackConstants) {
2458 auto t = AggrConsts.find(I);
2459 assert(t != AggrConsts.end());
2460 auto *NewOp =
2461 buildIntrWithMD(Intrinsic::spv_track_constant,
2462 {II->getType(), II->getType()}, t->second, I, {}, B);
2463 replaceAllUsesWith(I, NewOp, false);
2464 NewOp->setArgOperand(0, I);
2465 }
2466 bool IsPhi = isa<PHINode>(I), BPrepared = false;
2467 for (const auto &Op : I->operands()) {
2468 if (isa<PHINode>(I) || isa<SwitchInst>(I) ||
2470 continue;
2471 unsigned OpNo = Op.getOperandNo();
2472 if (II && ((II->getIntrinsicID() == Intrinsic::spv_gep && OpNo == 0) ||
2473 (II->paramHasAttr(OpNo, Attribute::ImmArg))))
2474 continue;
2475
2476 if (!BPrepared) {
2477 IsPhi ? B.SetInsertPointPastAllocas(I->getParent()->getParent())
2478 : B.SetInsertPoint(I);
2479 BPrepared = true;
2480 }
2481 Type *OpTy = Op->getType();
2482 Type *OpElemTy = GR->findDeducedElementType(Op);
2483 Value *NewOp = Op;
2484 if (OpTy->isTargetExtTy()) {
2485 // Since this value is replaced by poison, we need to do the same in
2486 // `insertAssignTypeIntrs`.
2487 Value *OpTyVal = getNormalizedPoisonValue(OpTy);
2488 NewOp = buildIntrWithMD(Intrinsic::spv_track_constant,
2489 {OpTy, OpTyVal->getType()}, Op, OpTyVal, {}, B);
2490 }
2491 if (!IsConstComposite && isPointerTy(OpTy) && OpElemTy != nullptr &&
2492 OpElemTy != IntegerType::getInt8Ty(I->getContext())) {
2493 SmallVector<Type *, 2> Types = {OpTy, OpTy};
2494 SmallVector<Value *, 2> Args = {
2495 NewOp, buildMD(getNormalizedPoisonValue(OpElemTy)),
2496 B.getInt32(getPointerAddressSpace(OpTy))};
2497 CallInst *PtrCasted =
2498 B.CreateIntrinsic(Intrinsic::spv_ptrcast, {Types}, Args);
2499 GR->buildAssignPtr(B, OpElemTy, PtrCasted);
2500 NewOp = PtrCasted;
2501 }
2502 if (NewOp != Op)
2503 I->setOperand(OpNo, NewOp);
2504 }
2505 if (Named.insert(I).second)
2506 emitAssignName(I, B);
2507}
2508
2509Type *SPIRVEmitIntrinsics::deduceFunParamElementType(Function *F,
2510 unsigned OpIdx) {
2511 std::unordered_set<Function *> FVisited;
2512 return deduceFunParamElementType(F, OpIdx, FVisited);
2513}
2514
2515Type *SPIRVEmitIntrinsics::deduceFunParamElementType(
2516 Function *F, unsigned OpIdx, std::unordered_set<Function *> &FVisited) {
2517 // maybe a cycle
2518 if (!FVisited.insert(F).second)
2519 return nullptr;
2520
2521 std::unordered_set<Value *> Visited;
2523 // search in function's call sites
2524 for (User *U : F->users()) {
2525 CallInst *CI = dyn_cast<CallInst>(U);
2526 if (!CI || OpIdx >= CI->arg_size())
2527 continue;
2528 Value *OpArg = CI->getArgOperand(OpIdx);
2529 if (!isPointerTy(OpArg->getType()))
2530 continue;
2531 // maybe we already know operand's element type
2532 if (Type *KnownTy = GR->findDeducedElementType(OpArg))
2533 return KnownTy;
2534 // try to deduce from the operand itself
2535 Visited.clear();
2536 if (Type *Ty = deduceElementTypeHelper(OpArg, Visited, false))
2537 return Ty;
2538 // search in actual parameter's users
2539 for (User *OpU : OpArg->users()) {
2541 if (!Inst || Inst == CI)
2542 continue;
2543 Visited.clear();
2544 if (Type *Ty = deduceElementTypeHelper(Inst, Visited, false))
2545 return Ty;
2546 }
2547 // check if it's a formal parameter of the outer function
2548 if (!CI->getParent() || !CI->getParent()->getParent())
2549 continue;
2550 Function *OuterF = CI->getParent()->getParent();
2551 if (FVisited.find(OuterF) != FVisited.end())
2552 continue;
2553 for (unsigned i = 0; i < OuterF->arg_size(); ++i) {
2554 if (OuterF->getArg(i) == OpArg) {
2555 Lookup.push_back(std::make_pair(OuterF, i));
2556 break;
2557 }
2558 }
2559 }
2560
2561 // search in function parameters
2562 for (auto &Pair : Lookup) {
2563 if (Type *Ty = deduceFunParamElementType(Pair.first, Pair.second, FVisited))
2564 return Ty;
2565 }
2566
2567 return nullptr;
2568}
2569
2570void SPIRVEmitIntrinsics::processParamTypesByFunHeader(Function *F,
2571 IRBuilder<> &B) {
2572 B.SetInsertPointPastAllocas(F);
2573 for (unsigned OpIdx = 0; OpIdx < F->arg_size(); ++OpIdx) {
2574 Argument *Arg = F->getArg(OpIdx);
2575 if (!isUntypedPointerTy(Arg->getType()))
2576 continue;
2577 Type *ElemTy = GR->findDeducedElementType(Arg);
2578 if (ElemTy)
2579 continue;
2580 if (hasPointeeTypeAttr(Arg) &&
2581 (ElemTy = getPointeeTypeByAttr(Arg)) != nullptr) {
2582 GR->buildAssignPtr(B, ElemTy, Arg);
2583 continue;
2584 }
2585 // search in function's call sites
2586 for (User *U : F->users()) {
2587 CallInst *CI = dyn_cast<CallInst>(U);
2588 if (!CI || OpIdx >= CI->arg_size())
2589 continue;
2590 Value *OpArg = CI->getArgOperand(OpIdx);
2591 if (!isPointerTy(OpArg->getType()))
2592 continue;
2593 // maybe we already know operand's element type
2594 if ((ElemTy = GR->findDeducedElementType(OpArg)) != nullptr)
2595 break;
2596 }
2597 if (ElemTy) {
2598 GR->buildAssignPtr(B, ElemTy, Arg);
2599 continue;
2600 }
2601 if (HaveFunPtrs) {
2602 for (User *U : Arg->users()) {
2603 CallInst *CI = dyn_cast<CallInst>(U);
2604 if (CI && !isa<IntrinsicInst>(CI) && CI->isIndirectCall() &&
2605 CI->getCalledOperand() == Arg &&
2606 CI->getParent()->getParent() == CurrF) {
2608 deduceOperandElementTypeFunctionPointer(CI, Ops, ElemTy, false);
2609 if (ElemTy) {
2610 GR->buildAssignPtr(B, ElemTy, Arg);
2611 break;
2612 }
2613 }
2614 }
2615 }
2616 }
2617}
2618
2619void SPIRVEmitIntrinsics::processParamTypes(Function *F, IRBuilder<> &B) {
2620 B.SetInsertPointPastAllocas(F);
2621 for (unsigned OpIdx = 0; OpIdx < F->arg_size(); ++OpIdx) {
2622 Argument *Arg = F->getArg(OpIdx);
2623 if (!isUntypedPointerTy(Arg->getType()))
2624 continue;
2625 Type *ElemTy = GR->findDeducedElementType(Arg);
2626 if (!ElemTy && (ElemTy = deduceFunParamElementType(F, OpIdx)) != nullptr) {
2627 if (CallInst *AssignCI = GR->findAssignPtrTypeInstr(Arg)) {
2628 DenseSet<std::pair<Value *, Value *>> VisitedSubst;
2629 GR->updateAssignType(AssignCI, Arg, getNormalizedPoisonValue(ElemTy));
2630 propagateElemType(Arg, IntegerType::getInt8Ty(F->getContext()),
2631 VisitedSubst);
2632 } else {
2633 GR->buildAssignPtr(B, ElemTy, Arg);
2634 }
2635 }
2636 }
2637}
2638
2640 SPIRVGlobalRegistry *GR) {
2641 FunctionType *FTy = F->getFunctionType();
2642 bool IsNewFTy = false;
2644 for (Argument &Arg : F->args()) {
2645 Type *ArgTy = Arg.getType();
2646 if (ArgTy->isPointerTy())
2647 if (Type *ElemTy = GR->findDeducedElementType(&Arg)) {
2648 IsNewFTy = true;
2649 ArgTy = getTypedPointerWrapper(ElemTy, getPointerAddressSpace(ArgTy));
2650 }
2651 ArgTys.push_back(ArgTy);
2652 }
2653 return IsNewFTy
2654 ? FunctionType::get(FTy->getReturnType(), ArgTys, FTy->isVarArg())
2655 : FTy;
2656}
2657
2658bool SPIRVEmitIntrinsics::processFunctionPointers(Module &M) {
2659 SmallVector<Function *> Worklist;
2660 for (auto &F : M) {
2661 if (F.isIntrinsic())
2662 continue;
2663 if (F.isDeclaration()) {
2664 for (User *U : F.users()) {
2665 CallInst *CI = dyn_cast<CallInst>(U);
2666 if (!CI || CI->getCalledFunction() != &F) {
2667 Worklist.push_back(&F);
2668 break;
2669 }
2670 }
2671 } else {
2672 if (F.user_empty())
2673 continue;
2674 Type *FPElemTy = GR->findDeducedElementType(&F);
2675 if (!FPElemTy)
2676 FPElemTy = getFunctionPointerElemType(&F, GR);
2677 for (User *U : F.users()) {
2678 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
2679 if (!II || II->arg_size() != 3 || II->getOperand(0) != &F)
2680 continue;
2681 if (II->getIntrinsicID() == Intrinsic::spv_assign_ptr_type ||
2682 II->getIntrinsicID() == Intrinsic::spv_ptrcast) {
2684 break;
2685 }
2686 }
2687 }
2688 }
2689 if (Worklist.empty())
2690 return false;
2691
2692 std::string ServiceFunName = SPIRV_BACKEND_SERVICE_FUN_NAME;
2693 if (!getVacantFunctionName(M, ServiceFunName))
2695 "cannot allocate a name for the internal service function");
2696 LLVMContext &Ctx = M.getContext();
2697 Function *SF =
2698 Function::Create(FunctionType::get(Type::getVoidTy(Ctx), {}, false),
2699 GlobalValue::PrivateLinkage, ServiceFunName, M);
2701 BasicBlock *BB = BasicBlock::Create(Ctx, "entry", SF);
2702 IRBuilder<> IRB(BB);
2703
2704 for (Function *F : Worklist) {
2706 for (const auto &Arg : F->args())
2707 Args.push_back(getNormalizedPoisonValue(Arg.getType()));
2708 IRB.CreateCall(F, Args);
2709 }
2710 IRB.CreateRetVoid();
2711
2712 return true;
2713}
2714
2715// Apply types parsed from demangled function declarations.
2716void SPIRVEmitIntrinsics::applyDemangledPtrArgTypes(IRBuilder<> &B) {
2717 DenseMap<Function *, CallInst *> Ptrcasts;
2718 for (auto It : FDeclPtrTys) {
2719 Function *F = It.first;
2720 for (auto *U : F->users()) {
2721 CallInst *CI = dyn_cast<CallInst>(U);
2722 if (!CI || CI->getCalledFunction() != F)
2723 continue;
2724 unsigned Sz = CI->arg_size();
2725 for (auto [Idx, ElemTy] : It.second) {
2726 if (Idx >= Sz)
2727 continue;
2728 Value *Param = CI->getArgOperand(Idx);
2729 if (GR->findDeducedElementType(Param) || isa<GlobalValue>(Param))
2730 continue;
2731 if (Argument *Arg = dyn_cast<Argument>(Param)) {
2732 if (!hasPointeeTypeAttr(Arg)) {
2733 B.SetInsertPointPastAllocas(Arg->getParent());
2734 B.SetCurrentDebugLocation(DebugLoc());
2735 GR->buildAssignPtr(B, ElemTy, Arg);
2736 }
2737 } else if (isa<GetElementPtrInst>(Param)) {
2738 replaceUsesOfWithSpvPtrcast(Param, normalizeType(ElemTy), CI,
2739 Ptrcasts);
2740 } else if (isa<Instruction>(Param)) {
2741 GR->addDeducedElementType(Param, normalizeType(ElemTy));
2742 // insertAssignTypeIntrs() will complete buildAssignPtr()
2743 } else {
2744 B.SetInsertPoint(CI->getParent()
2745 ->getParent()
2746 ->getEntryBlock()
2747 .getFirstNonPHIOrDbgOrAlloca());
2748 GR->buildAssignPtr(B, ElemTy, Param);
2749 }
2750 CallInst *Ref = dyn_cast<CallInst>(Param);
2751 if (!Ref)
2752 continue;
2753 Function *RefF = Ref->getCalledFunction();
2754 if (!RefF || !isPointerTy(RefF->getReturnType()) ||
2755 GR->findDeducedElementType(RefF))
2756 continue;
2757 ElemTy = normalizeType(ElemTy);
2758 GR->addDeducedElementType(RefF, ElemTy);
2759 GR->addReturnType(
2761 ElemTy, getPointerAddressSpace(RefF->getReturnType())));
2762 }
2763 }
2764 }
2765}
2766
2767GetElementPtrInst *
2768SPIRVEmitIntrinsics::simplifyZeroLengthArrayGepInst(GetElementPtrInst *GEP) {
2769 // getelementptr [0 x T], P, 0 (zero), I -> getelementptr T, P, I.
2770 // If type is 0-length array and first index is 0 (zero), drop both the
2771 // 0-length array type and the first index. This is a common pattern in
2772 // the IR, e.g. when using a zero-length array as a placeholder for a
2773 // flexible array such as unbound arrays.
2774 assert(GEP && "GEP is null");
2775 Type *SrcTy = GEP->getSourceElementType();
2776 SmallVector<Value *, 8> Indices(GEP->indices());
2777 ArrayType *ArrTy = dyn_cast<ArrayType>(SrcTy);
2778 if (ArrTy && ArrTy->getNumElements() == 0 &&
2780 IRBuilder<> Builder(GEP);
2781 Indices.erase(Indices.begin());
2782 SrcTy = ArrTy->getElementType();
2783 Value *NewGEP = Builder.CreateGEP(SrcTy, GEP->getPointerOperand(), Indices,
2784 "", GEP->getNoWrapFlags());
2785 assert(llvm::isa<GetElementPtrInst>(NewGEP) && "NewGEP should be a GEP");
2786 return cast<GetElementPtrInst>(NewGEP);
2787 }
2788 return nullptr;
2789}
2790
2791bool SPIRVEmitIntrinsics::runOnFunction(Function &Func) {
2792 if (Func.isDeclaration())
2793 return false;
2794
2795 const SPIRVSubtarget &ST = TM->getSubtarget<SPIRVSubtarget>(Func);
2796 GR = ST.getSPIRVGlobalRegistry();
2797
2798 if (!CurrF)
2799 HaveFunPtrs =
2800 ST.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers);
2801
2802 CurrF = &Func;
2803 IRBuilder<> B(Func.getContext());
2804 AggrConsts.clear();
2805 AggrConstTypes.clear();
2806 AggrStores.clear();
2807
2808 // Fix GEP result types ahead of inference, and simplify if possible.
2809 // Data structure for dead instructions that were simplified and replaced.
2810 SmallPtrSet<Instruction *, 4> DeadInsts;
2811 for (auto &I : instructions(Func)) {
2813 if (!Ref || GR->findDeducedElementType(Ref))
2814 continue;
2815
2816 GetElementPtrInst *NewGEP = simplifyZeroLengthArrayGepInst(Ref);
2817 if (NewGEP) {
2818 Ref->replaceAllUsesWith(NewGEP);
2819 DeadInsts.insert(Ref);
2820 Ref = NewGEP;
2821 }
2822 if (Type *GepTy = getGEPType(Ref))
2823 GR->addDeducedElementType(Ref, normalizeType(GepTy));
2824 }
2825 // Remove dead instructions that were simplified and replaced.
2826 for (auto *I : DeadInsts) {
2827 assert(I->use_empty() && "Dead instruction should not have any uses left");
2828 I->eraseFromParent();
2829 }
2830
2831 processParamTypesByFunHeader(CurrF, B);
2832
2833 // StoreInst's operand type can be changed during the next
2834 // transformations, so we need to store it in the set. Also store already
2835 // transformed types.
2836 for (auto &I : instructions(Func)) {
2837 StoreInst *SI = dyn_cast<StoreInst>(&I);
2838 if (!SI)
2839 continue;
2840 Type *ElTy = SI->getValueOperand()->getType();
2841 if (ElTy->isAggregateType() || ElTy->isVectorTy())
2842 AggrStores.insert(&I);
2843 }
2844
2845 B.SetInsertPoint(&Func.getEntryBlock(), Func.getEntryBlock().begin());
2846 for (auto &GV : Func.getParent()->globals())
2847 processGlobalValue(GV, B);
2848
2849 preprocessUndefs(B);
2850 preprocessCompositeConstants(B);
2853
2854 applyDemangledPtrArgTypes(B);
2855
2856 // Pass forward: use operand to deduce instructions result.
2857 for (auto &I : Worklist) {
2858 // Don't emit intrinsincs for convergence intrinsics.
2859 if (isConvergenceIntrinsic(I))
2860 continue;
2861
2862 bool Postpone = insertAssignPtrTypeIntrs(I, B, false);
2863 // if Postpone is true, we can't decide on pointee type yet
2864 insertAssignTypeIntrs(I, B);
2865 insertPtrCastOrAssignTypeInstr(I, B);
2867 // if instruction requires a pointee type set, let's check if we know it
2868 // already, and force it to be i8 if not
2869 if (Postpone && !GR->findAssignPtrTypeInstr(I))
2870 insertAssignPtrTypeIntrs(I, B, true);
2871
2872 if (auto *FPI = dyn_cast<ConstrainedFPIntrinsic>(I))
2873 useRoundingMode(FPI, B);
2874 }
2875
2876 // Pass backward: use instructions results to specify/update/cast operands
2877 // where needed.
2878 SmallPtrSet<Instruction *, 4> IncompleteRets;
2879 for (auto &I : llvm::reverse(instructions(Func)))
2880 deduceOperandElementType(&I, &IncompleteRets);
2881
2882 // Pass forward for PHIs only, their operands are not preceed the
2883 // instruction in meaning of `instructions(Func)`.
2884 for (BasicBlock &BB : Func)
2885 for (PHINode &Phi : BB.phis())
2886 if (isPointerTy(Phi.getType()))
2887 deduceOperandElementType(&Phi, nullptr);
2888
2889 for (auto *I : Worklist) {
2890 TrackConstants = true;
2891 if (!I->getType()->isVoidTy() || isa<StoreInst>(I))
2893 // Visitors return either the original/newly created instruction for
2894 // further processing, nullptr otherwise.
2895 I = visit(*I);
2896 if (!I)
2897 continue;
2898
2899 // Don't emit intrinsics for convergence operations.
2900 if (isConvergenceIntrinsic(I))
2901 continue;
2902
2904 processInstrAfterVisit(I, B);
2905 }
2906
2907 return true;
2908}
2909
2910// Try to deduce a better type for pointers to untyped ptr.
2911bool SPIRVEmitIntrinsics::postprocessTypes(Module &M) {
2912 if (!GR || TodoTypeSz == 0)
2913 return false;
2914
2915 unsigned SzTodo = TodoTypeSz;
2916 DenseMap<Value *, SmallPtrSet<Value *, 4>> ToProcess;
2917 for (auto [Op, Enabled] : TodoType) {
2918 // TODO: add isa<CallInst>(Op) to continue
2920 continue;
2921 CallInst *AssignCI = GR->findAssignPtrTypeInstr(Op);
2922 Type *KnownTy = GR->findDeducedElementType(Op);
2923 if (!KnownTy || !AssignCI)
2924 continue;
2925 assert(Op == AssignCI->getArgOperand(0));
2926 // Try to improve the type deduced after all Functions are processed.
2927 if (auto *CI = dyn_cast<Instruction>(Op)) {
2928 CurrF = CI->getParent()->getParent();
2929 std::unordered_set<Value *> Visited;
2930 if (Type *ElemTy = deduceElementTypeHelper(Op, Visited, false, true)) {
2931 if (ElemTy != KnownTy) {
2932 DenseSet<std::pair<Value *, Value *>> VisitedSubst;
2933 propagateElemType(CI, ElemTy, VisitedSubst);
2934 eraseTodoType(Op);
2935 continue;
2936 }
2937 }
2938 }
2939
2940 if (Op->hasUseList()) {
2941 for (User *U : Op->users()) {
2943 if (Inst && !isa<IntrinsicInst>(Inst))
2944 ToProcess[Inst].insert(Op);
2945 }
2946 }
2947 }
2948 if (TodoTypeSz == 0)
2949 return true;
2950
2951 for (auto &F : M) {
2952 CurrF = &F;
2953 SmallPtrSet<Instruction *, 4> IncompleteRets;
2954 for (auto &I : llvm::reverse(instructions(F))) {
2955 auto It = ToProcess.find(&I);
2956 if (It == ToProcess.end())
2957 continue;
2958 It->second.remove_if([this](Value *V) { return !isTodoType(V); });
2959 if (It->second.size() == 0)
2960 continue;
2961 deduceOperandElementType(&I, &IncompleteRets, &It->second, true);
2962 if (TodoTypeSz == 0)
2963 return true;
2964 }
2965 }
2966
2967 return SzTodo > TodoTypeSz;
2968}
2969
2970// Parse and store argument types of function declarations where needed.
2971void SPIRVEmitIntrinsics::parseFunDeclarations(Module &M) {
2972 for (auto &F : M) {
2973 if (!F.isDeclaration() || F.isIntrinsic())
2974 continue;
2975 // get the demangled name
2976 std::string DemangledName = getOclOrSpirvBuiltinDemangledName(F.getName());
2977 if (DemangledName.empty())
2978 continue;
2979 // allow only OpGroupAsyncCopy use case at the moment
2980 const SPIRVSubtarget &ST = TM->getSubtarget<SPIRVSubtarget>(F);
2981 auto [Grp, Opcode, ExtNo] = SPIRV::mapBuiltinToOpcode(
2982 DemangledName, ST.getPreferredInstructionSet());
2983 if (Opcode != SPIRV::OpGroupAsyncCopy)
2984 continue;
2985 // find pointer arguments
2986 SmallVector<unsigned> Idxs;
2987 for (unsigned OpIdx = 0; OpIdx < F.arg_size(); ++OpIdx) {
2988 Argument *Arg = F.getArg(OpIdx);
2989 if (isPointerTy(Arg->getType()) && !hasPointeeTypeAttr(Arg))
2990 Idxs.push_back(OpIdx);
2991 }
2992 if (!Idxs.size())
2993 continue;
2994 // parse function arguments
2995 LLVMContext &Ctx = F.getContext();
2997 SPIRV::parseBuiltinTypeStr(TypeStrs, DemangledName, Ctx);
2998 if (!TypeStrs.size())
2999 continue;
3000 // find type info for pointer arguments
3001 for (unsigned Idx : Idxs) {
3002 if (Idx >= TypeStrs.size())
3003 continue;
3004 if (Type *ElemTy =
3005 SPIRV::parseBuiltinCallArgumentType(TypeStrs[Idx].trim(), Ctx))
3007 !ElemTy->isTargetExtTy())
3008 FDeclPtrTys[&F].push_back(std::make_pair(Idx, ElemTy));
3009 }
3010 }
3011}
3012
3013bool SPIRVEmitIntrinsics::runOnModule(Module &M) {
3014 bool Changed = false;
3015
3016 parseFunDeclarations(M);
3017 insertConstantsForFPFastMathDefault(M);
3018
3019 TodoType.clear();
3020 for (auto &F : M)
3022
3023 // Specify function parameters after all functions were processed.
3024 for (auto &F : M) {
3025 // check if function parameter types are set
3026 CurrF = &F;
3027 if (!F.isDeclaration() && !F.isIntrinsic()) {
3028 IRBuilder<> B(F.getContext());
3029 processParamTypes(&F, B);
3030 }
3031 }
3032
3033 CanTodoType = false;
3034 Changed |= postprocessTypes(M);
3035
3036 if (HaveFunPtrs)
3037 Changed |= processFunctionPointers(M);
3038
3039 return Changed;
3040}
3041
3043 return new SPIRVEmitIntrinsics(TM);
3044}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
aarch64 promote const
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
always inline
Expand Atomic instructions
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
static void replaceAllUsesWith(Value *Old, Value *New, SmallPtrSet< BasicBlock *, 32 > &FreshBBs, bool IsHuge)
Replace all old uses with new ones, and push the updated BBs into FreshBBs.
This file defines the DenseSet and SmallDenseSet classes.
static bool runOnFunction(Function &F, bool PostInlining)
Hexagon Common GEP
iv Induction Variable Users
Definition IVUsers.cpp:48
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
Machine Check Debug Module
#define T
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
Function * Fun
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
static unsigned getNumElements(Type *Ty)
static bool isMemInstrToReplace(Instruction *I)
static bool isAggrConstForceInt32(const Value *V)
static SPIRV::FPFastMathDefaultInfoVector & getOrCreateFPFastMathDefaultInfoVec(const Module &M, DenseMap< Function *, SPIRV::FPFastMathDefaultInfoVector > &FPFastMathDefaultInfoMap, Function *F)
static Type * getAtomicElemTy(SPIRVGlobalRegistry *GR, Instruction *I, Value *PointerOperand)
static void reportFatalOnTokenType(const Instruction *I)
static void setInsertPointAfterDef(IRBuilder<> &B, Instruction *I)
static void emitAssignName(Instruction *I, IRBuilder<> &B)
static Type * getPointeeTypeByCallInst(StringRef DemangledName, Function *CalledF, unsigned OpIdx)
static void createRoundingModeDecoration(Instruction *I, unsigned RoundingModeDeco, IRBuilder<> &B)
static void createDecorationIntrinsic(Instruction *I, MDNode *Node, IRBuilder<> &B)
static SPIRV::FPFastMathDefaultInfo & getFPFastMathDefaultInfo(SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec, const Type *Ty)
static bool IsKernelArgInt8(Function *F, StoreInst *SI)
static void addSaturatedDecorationToIntrinsic(Instruction *I, IRBuilder<> &B)
static void setInsertPointSkippingPhis(IRBuilder<> &B, Instruction *I)
static FunctionType * getFunctionPointerElemType(Function *F, SPIRVGlobalRegistry *GR)
static void createSaturatedConversionDecoration(Instruction *I, IRBuilder<> &B)
static Type * restoreMutatedType(SPIRVGlobalRegistry *GR, Instruction *I, Type *Ty)
static bool requireAssignType(Instruction *I)
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
static void insertSpirvDecorations(MachineFunction &MF, SPIRVGlobalRegistry *GR, MachineIRBuilder MIB)
#define SPIRV_BACKEND_SERVICE_FUN_NAME
Definition SPIRVUtils.h:503
static bool Enabled
Definition Statistic.cpp:46
StringSet - A set-like wrapper for the StringMap.
DEMANGLE_NAMESPACE_BEGIN bool starts_with(std::string_view self, char C) noexcept
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
const Function * getParent() const
Definition Argument.h:44
static unsigned getPointerOperandIndex()
static unsigned getPointerOperandIndex()
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:528
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
static LLVM_ABI BlockAddress * get(Function *F, BasicBlock *BB)
Return a BlockAddress for the specified function and basic block.
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
FunctionType * getFunctionType() const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static ConstantAsMetadata * get(Constant *C)
Definition Metadata.h:536
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:163
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:167
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:237
iterator end()
Definition DenseMap.h:81
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:222
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
void addFnAttr(Attribute::AttrKind Kind)
Add function attributes to this function.
Definition Function.cpp:637
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition Function.h:166
const DataLayout & getDataLayout() const
Get the data layout of the module this function belongs to.
Definition Function.cpp:363
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition Function.h:244
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
Definition Function.h:249
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
size_t arg_size() const
Definition Function.h:899
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:214
Argument * getArg(unsigned i) const
Definition Function.h:884
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static unsigned getPointerOperandIndex()
PointerType * getType() const
Global values are always pointers.
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition GlobalValue.h:61
@ InternalLinkage
Rename collisions when linking (static functions).
Definition GlobalValue.h:60
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2788
LLVM_ABI void addDestination(BasicBlock *Dest)
Add a destination.
Base class for instruction visitors.
Definition InstVisitor.h:78
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
static unsigned getPointerOperandIndex()
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1569
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1448
static LLVM_ABI MDString * get(LLVMContext &Context, StringRef Str)
Definition Metadata.cpp:608
Flags
Flags values. These may be or'd together.
static LLVM_ABI MetadataAsValue * get(LLVMContext &Context, Metadata *MD)
Definition Metadata.cpp:104
Metadata * getMetadata() const
Definition Metadata.h:201
ModulePass class - This class is used to implement unstructured interprocedural optimizations and ana...
Definition Pass.h:255
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
void addAssignPtrTypeInstr(Value *Val, CallInst *AssignPtrTyCI)
void buildAssignPtr(IRBuilder<> &B, Type *ElemTy, Value *Arg)
Type * findDeducedCompositeType(const Value *Val)
void replaceAllUsesWith(Value *Old, Value *New, bool DeleteOld=true)
void addDeducedElementType(Value *Val, Type *Ty)
void addReturnType(const Function *ArgF, TypedPointerType *DerivedTy)
Type * findMutated(const Value *Val)
void addDeducedCompositeType(Value *Val, Type *Ty)
void buildAssignType(IRBuilder<> &B, Type *Ty, Value *Arg)
Type * findDeducedElementType(const Value *Val)
void updateAssignType(CallInst *AssignCI, Value *Arg, Value *OfType)
CallInst * findAssignPtrTypeInstr(const Value *Val)
const SPIRVTargetLowering * getTargetLowering() const override
bool isLogicalSPIRV() const
bool canUseExtension(SPIRV::Extension::Extension E) const
const SPIRVSubtarget * getSubtargetImpl() const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
static unsigned getPointerOperandIndex()
iterator end()
Definition StringMap.h:224
iterator find(StringRef Key)
Definition StringMap.h:237
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
bool contains(StringRef key) const
Check if the set contains the given key.
Definition StringSet.h:60
static LLVM_ABI StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Definition Type.cpp:620
static LLVM_ABI TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types={}, ArrayRef< unsigned > Ints={})
Return a target extension type having the specified name and optional type and integer parameters.
Definition Type.cpp:908
Type * getTypeParameter(unsigned i) const
const STC & getSubtarget(const Function &F) const
This method returns a pointer to the specified type of TargetSubtargetInfo.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:264
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:297
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
Type * getArrayElementType() const
Definition Type.h:408
LLVM_ABI StringRef getTargetExtName() const
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:295
bool isStructTy() const
True if this is an instance of StructType.
Definition Type.h:261
bool isTargetExtTy() const
Return true if this is a target extension type.
Definition Type.h:203
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition Type.h:304
static LLVM_ABI Type * getDoubleTy(LLVMContext &C)
Definition Type.cpp:286
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
Definition Type.cpp:285
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
Definition Type.cpp:283
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
static LLVM_ABI bool isValidElementType(Type *ElemTy)
Return true if the specified type is valid as a element type.
static LLVM_ABI TypedPointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
op_range operands()
Definition User.h:292
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition User.cpp:21
void setOperand(unsigned i, Value *Val)
Definition User.h:237
Value * getOperand(unsigned i) const
Definition User.h:232
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:390
iterator_range< user_iterator > users()
Definition Value.h:426
bool use_empty() const
Definition Value.h:346
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
bool user_empty() const
Definition Value.h:389
const ParentTy * getParent() const
Definition ilist_node.h:34
CallInst * Call
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
bool match(Val *V, const Pattern &P)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
DenseSetImpl< ValueT, DenseMap< ValueT, DenseSetEmpty, ValueInfoT, DenseSetPair< ValueT > >, ValueInfoT > DenseSet
Definition DenseSet.h:264
ElementType
The element type of an SRV or UAV resource.
Definition DXILABI.h:60
@ User
could "use" a pointer
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
NodeAddr< FuncNode * > Func
Definition RDFGraph.h:393
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
bool getVacantFunctionName(Module &M, std::string &Name)
@ Offset
Definition DWP.cpp:477
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool isTypedPointerWrapper(const TargetExtType *ExtTy)
Definition SPIRVUtils.h:381
ModulePass * createSPIRVEmitIntrinsicsPass(SPIRVTargetMachine *TM)
unsigned getPointerAddressSpace(const Type *T)
Definition SPIRVUtils.h:345
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:644
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
CallInst * buildIntrWithMD(Intrinsic::ID IntrID, ArrayRef< Type * > Types, Value *Arg, Value *Arg2, ArrayRef< Constant * > Imms, IRBuilder<> &B)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2136
FPDecorationId
Definition SPIRVUtils.h:527
bool isNestedPointer(const Type *Ty)
MetadataAsValue * buildMD(Value *Arg)
Definition SPIRVUtils.h:491
std::string getOclOrSpirvBuiltinDemangledName(StringRef Name)
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
Type * getTypedPointerWrapper(Type *ElemTy, unsigned AS)
Definition SPIRVUtils.h:376
bool isVector1(Type *Ty)
Definition SPIRVUtils.h:469
bool isPointerTy(const Type *T)
Definition SPIRVUtils.h:339
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
@ Ref
The access may reference the value stored in memory.
Definition ModRef.h:32
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
DWARFExpression::Operation Op
Type * getPointeeTypeByAttr(Argument *Arg)
Definition SPIRVUtils.h:358
bool hasPointeeTypeAttr(Argument *Arg)
Definition SPIRVUtils.h:353
constexpr unsigned BitWidth
bool isEquivalentTypes(Type *Ty1, Type *Ty2)
Definition SPIRVUtils.h:431
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:560
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition iterator.h:363
bool hasInitializer(const GlobalVariable *GV)
Definition SPIRVUtils.h:324
Type * normalizeType(Type *Ty)
Definition SPIRVUtils.h:477
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
Type * getPointeeType(const Type *Ty)
Definition SPIRVUtils.h:408
PoisonValue * getNormalizedPoisonValue(Type *Ty)
Definition SPIRVUtils.h:487
bool isUntypedPointerTy(const Type *T)
Definition SPIRVUtils.h:334
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
static size_t computeFPFastMathDefaultInfoVecIndex(size_t BitWidth)
Definition SPIRVUtils.h:146