LLVM 19.0.0git
SPIRVInstructionSelector.cpp
Go to the documentation of this file.
1//===- SPIRVInstructionSelector.cpp ------------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the targeting of the InstructionSelector class for
10// SPIRV.
11// TODO: This should be generated by TableGen.
12//
13//===----------------------------------------------------------------------===//
14
17#include "SPIRV.h"
18#include "SPIRVGlobalRegistry.h"
19#include "SPIRVInstrInfo.h"
21#include "SPIRVRegisterInfo.h"
22#include "SPIRVTargetMachine.h"
23#include "SPIRVUtils.h"
24#include "llvm/ADT/APFloat.h"
31#include "llvm/IR/IntrinsicsSPIRV.h"
32#include "llvm/Support/Debug.h"
33
34namespace llvm {
35
37public:
43
45 LLVMContext &CTX = MMI.getModule()->getContext();
46 Work_ItemSSID = CTX.getOrInsertSyncScopeID("work_item");
47 WorkGroupSSID = CTX.getOrInsertSyncScopeID("workgroup");
48 DeviceSSID = CTX.getOrInsertSyncScopeID("device");
49 AllSVMDevicesSSID = CTX.getOrInsertSyncScopeID("all_svm_devices");
50 SubGroupSSID = CTX.getOrInsertSyncScopeID("sub_group");
51 }
52};
53
54} // end namespace llvm
55
56#define DEBUG_TYPE "spirv-isel"
57
58using namespace llvm;
59namespace CL = SPIRV::OpenCLExtInst;
60namespace GL = SPIRV::GLSLExtInst;
61
63 std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
64
65namespace {
66
67#define GET_GLOBALISEL_PREDICATE_BITSET
68#include "SPIRVGenGlobalISel.inc"
69#undef GET_GLOBALISEL_PREDICATE_BITSET
70
71class SPIRVInstructionSelector : public InstructionSelector {
72 const SPIRVSubtarget &STI;
73 const SPIRVInstrInfo &TII;
75 const RegisterBankInfo &RBI;
78 SPIRVMachineModuleInfo *MMI = nullptr;
79
80 /// We need to keep track of the number we give to anonymous global values to
81 /// generate the same name every time when this is needed.
82 mutable DenseMap<const GlobalValue *, unsigned> UnnamedGlobalIDs;
83
84public:
85 SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
86 const SPIRVSubtarget &ST,
87 const RegisterBankInfo &RBI);
88 void setupMF(MachineFunction &MF, GISelKnownBits *KB,
89 CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
90 BlockFrequencyInfo *BFI) override;
91 // Common selection code. Instruction-specific selection occurs in spvSelect.
92 bool select(MachineInstr &I) override;
93 static const char *getName() { return DEBUG_TYPE; }
94
95#define GET_GLOBALISEL_PREDICATES_DECL
96#include "SPIRVGenGlobalISel.inc"
97#undef GET_GLOBALISEL_PREDICATES_DECL
98
99#define GET_GLOBALISEL_TEMPORARIES_DECL
100#include "SPIRVGenGlobalISel.inc"
101#undef GET_GLOBALISEL_TEMPORARIES_DECL
102
103private:
104 // tblgen-erated 'select' implementation, used as the initial selector for
105 // the patterns that don't require complex C++.
106 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
107
108 // All instruction-specific selection that didn't happen in "select()".
109 // Is basically a large Switch/Case delegating to all other select method.
110 bool spvSelect(Register ResVReg, const SPIRVType *ResType,
111 MachineInstr &I) const;
112
113 bool selectGlobalValue(Register ResVReg, MachineInstr &I,
114 const MachineInstr *Init = nullptr) const;
115
116 bool selectUnOpWithSrc(Register ResVReg, const SPIRVType *ResType,
117 MachineInstr &I, Register SrcReg,
118 unsigned Opcode) const;
119 bool selectUnOp(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
120 unsigned Opcode) const;
121
122 bool selectBitcast(Register ResVReg, const SPIRVType *ResType,
123 MachineInstr &I) const;
124
125 bool selectLoad(Register ResVReg, const SPIRVType *ResType,
126 MachineInstr &I) const;
127 bool selectStore(MachineInstr &I) const;
128
129 bool selectStackSave(Register ResVReg, const SPIRVType *ResType,
130 MachineInstr &I) const;
131 bool selectStackRestore(MachineInstr &I) const;
132
133 bool selectMemOperation(Register ResVReg, MachineInstr &I) const;
134
135 bool selectAtomicRMW(Register ResVReg, const SPIRVType *ResType,
136 MachineInstr &I, unsigned NewOpcode,
137 unsigned NegateOpcode = 0) const;
138
139 bool selectAtomicCmpXchg(Register ResVReg, const SPIRVType *ResType,
140 MachineInstr &I) const;
141
142 bool selectFence(MachineInstr &I) const;
143
144 bool selectAddrSpaceCast(Register ResVReg, const SPIRVType *ResType,
145 MachineInstr &I) const;
146
147 bool selectBitreverse(Register ResVReg, const SPIRVType *ResType,
148 MachineInstr &I) const;
149
150 bool selectConstVector(Register ResVReg, const SPIRVType *ResType,
151 MachineInstr &I) const;
152 bool selectSplatVector(Register ResVReg, const SPIRVType *ResType,
153 MachineInstr &I) const;
154
155 bool selectCmp(Register ResVReg, const SPIRVType *ResType,
156 unsigned comparisonOpcode, MachineInstr &I) const;
157
158 bool selectICmp(Register ResVReg, const SPIRVType *ResType,
159 MachineInstr &I) const;
160 bool selectFCmp(Register ResVReg, const SPIRVType *ResType,
161 MachineInstr &I) const;
162
163 void renderImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
164 int OpIdx) const;
165 void renderFImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
166 int OpIdx) const;
167
168 bool selectConst(Register ResVReg, const SPIRVType *ResType, const APInt &Imm,
169 MachineInstr &I) const;
170
171 bool selectSelect(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
172 bool IsSigned) const;
173 bool selectIToF(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
174 bool IsSigned, unsigned Opcode) const;
175 bool selectExt(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
176 bool IsSigned) const;
177
178 bool selectTrunc(Register ResVReg, const SPIRVType *ResType,
179 MachineInstr &I) const;
180
181 bool selectIntToBool(Register IntReg, Register ResVReg, MachineInstr &I,
182 const SPIRVType *intTy, const SPIRVType *boolTy) const;
183
184 bool selectOpUndef(Register ResVReg, const SPIRVType *ResType,
185 MachineInstr &I) const;
186 bool selectFreeze(Register ResVReg, const SPIRVType *ResType,
187 MachineInstr &I) const;
188 bool selectIntrinsic(Register ResVReg, const SPIRVType *ResType,
189 MachineInstr &I) const;
190 bool selectExtractVal(Register ResVReg, const SPIRVType *ResType,
191 MachineInstr &I) const;
192 bool selectInsertVal(Register ResVReg, const SPIRVType *ResType,
193 MachineInstr &I) const;
194 bool selectExtractElt(Register ResVReg, const SPIRVType *ResType,
195 MachineInstr &I) const;
196 bool selectInsertElt(Register ResVReg, const SPIRVType *ResType,
197 MachineInstr &I) const;
198 bool selectGEP(Register ResVReg, const SPIRVType *ResType,
199 MachineInstr &I) const;
200
201 bool selectFrameIndex(Register ResVReg, const SPIRVType *ResType,
202 MachineInstr &I) const;
203 bool selectAllocaArray(Register ResVReg, const SPIRVType *ResType,
204 MachineInstr &I) const;
205
206 bool selectBranch(MachineInstr &I) const;
207 bool selectBranchCond(MachineInstr &I) const;
208
209 bool selectPhi(Register ResVReg, const SPIRVType *ResType,
210 MachineInstr &I) const;
211
212 bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
213 MachineInstr &I, CL::OpenCLExtInst CLInst) const;
214 bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
215 MachineInstr &I, CL::OpenCLExtInst CLInst,
216 GL::GLSLExtInst GLInst) const;
217 bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
218 MachineInstr &I, const ExtInstList &ExtInsts) const;
219
220 bool selectLog10(Register ResVReg, const SPIRVType *ResType,
221 MachineInstr &I) const;
222
223 bool selectSpvThreadId(Register ResVReg, const SPIRVType *ResType,
224 MachineInstr &I) const;
225
227
228 Register buildI32Constant(uint32_t Val, MachineInstr &I,
229 const SPIRVType *ResType = nullptr) const;
230
231 Register buildZerosVal(const SPIRVType *ResType, MachineInstr &I) const;
232 Register buildOnesVal(bool AllOnes, const SPIRVType *ResType,
233 MachineInstr &I) const;
234
235 bool wrapIntoSpecConstantOp(MachineInstr &I,
236 SmallVector<Register> &CompositeArgs) const;
237};
238
239} // end anonymous namespace
240
241#define GET_GLOBALISEL_IMPL
242#include "SPIRVGenGlobalISel.inc"
243#undef GET_GLOBALISEL_IMPL
244
245SPIRVInstructionSelector::SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
246 const SPIRVSubtarget &ST,
247 const RegisterBankInfo &RBI)
248 : InstructionSelector(), STI(ST), TII(*ST.getInstrInfo()),
249 TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
251#include "SPIRVGenGlobalISel.inc"
254#include "SPIRVGenGlobalISel.inc"
256{
257}
258
259void SPIRVInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
260 CodeGenCoverage *CoverageInfo,
262 BlockFrequencyInfo *BFI) {
264 MRI = &MF.getRegInfo();
265 GR.setCurrentFunc(MF);
266 InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
267}
268
269static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI);
270
271// Defined in SPIRVLegalizerInfo.cpp.
272extern bool isTypeFoldingSupported(unsigned Opcode);
273
274bool SPIRVInstructionSelector::select(MachineInstr &I) {
275 assert(I.getParent() && "Instruction should be in a basic block!");
276 assert(I.getParent()->getParent() && "Instruction should be in a function!");
277
278 Register Opcode = I.getOpcode();
279 // If it's not a GMIR instruction, we've selected it already.
280 if (!isPreISelGenericOpcode(Opcode)) {
281 if (Opcode == SPIRV::ASSIGN_TYPE) { // These pseudos aren't needed any more.
282 auto *Def = MRI->getVRegDef(I.getOperand(1).getReg());
283 if (isTypeFoldingSupported(Def->getOpcode())) {
284 bool Res = selectImpl(I, *CoverageInfo);
285 assert(Res || Def->getOpcode() == TargetOpcode::G_CONSTANT);
286 if (Res)
287 return Res;
288 }
289 MRI->replaceRegWith(I.getOperand(1).getReg(), I.getOperand(0).getReg());
290 I.removeFromParent();
291 return true;
292 } else if (I.getNumDefs() == 1) {
293 // Make all vregs 32 bits (for SPIR-V IDs).
294 MRI->setType(I.getOperand(0).getReg(), LLT::scalar(32));
295 }
297 }
298
299 if (I.getNumOperands() != I.getNumExplicitOperands()) {
300 LLVM_DEBUG(errs() << "Generic instr has unexpected implicit operands\n");
301 return false;
302 }
303
304 // Common code for getting return reg+type, and removing selected instr
305 // from parent occurs here. Instr-specific selection happens in spvSelect().
306 bool HasDefs = I.getNumDefs() > 0;
307 Register ResVReg = HasDefs ? I.getOperand(0).getReg() : Register(0);
308 SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) : nullptr;
309 assert(!HasDefs || ResType || I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
310 if (spvSelect(ResVReg, ResType, I)) {
311 if (HasDefs) // Make all vregs 32 bits (for SPIR-V IDs).
312 for (unsigned i = 0; i < I.getNumDefs(); ++i)
313 MRI->setType(I.getOperand(i).getReg(), LLT::scalar(32));
314 I.removeFromParent();
315 return true;
316 }
317 return false;
318}
319
320bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
321 const SPIRVType *ResType,
322 MachineInstr &I) const {
323 const unsigned Opcode = I.getOpcode();
324 if (isTypeFoldingSupported(Opcode) && Opcode != TargetOpcode::G_CONSTANT)
325 return selectImpl(I, *CoverageInfo);
326 switch (Opcode) {
327 case TargetOpcode::G_CONSTANT:
328 return selectConst(ResVReg, ResType, I.getOperand(1).getCImm()->getValue(),
329 I);
330 case TargetOpcode::G_GLOBAL_VALUE:
331 return selectGlobalValue(ResVReg, I);
332 case TargetOpcode::G_IMPLICIT_DEF:
333 return selectOpUndef(ResVReg, ResType, I);
334 case TargetOpcode::G_FREEZE:
335 return selectFreeze(ResVReg, ResType, I);
336
337 case TargetOpcode::G_INTRINSIC:
338 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
339 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
340 return selectIntrinsic(ResVReg, ResType, I);
341 case TargetOpcode::G_BITREVERSE:
342 return selectBitreverse(ResVReg, ResType, I);
343
344 case TargetOpcode::G_BUILD_VECTOR:
345 return selectConstVector(ResVReg, ResType, I);
346 case TargetOpcode::G_SPLAT_VECTOR:
347 return selectSplatVector(ResVReg, ResType, I);
348
349 case TargetOpcode::G_SHUFFLE_VECTOR: {
350 MachineBasicBlock &BB = *I.getParent();
351 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorShuffle))
352 .addDef(ResVReg)
353 .addUse(GR.getSPIRVTypeID(ResType))
354 .addUse(I.getOperand(1).getReg())
355 .addUse(I.getOperand(2).getReg());
356 for (auto V : I.getOperand(3).getShuffleMask())
357 MIB.addImm(V);
358 return MIB.constrainAllUses(TII, TRI, RBI);
359 }
360 case TargetOpcode::G_MEMMOVE:
361 case TargetOpcode::G_MEMCPY:
362 case TargetOpcode::G_MEMSET:
363 return selectMemOperation(ResVReg, I);
364
365 case TargetOpcode::G_ICMP:
366 return selectICmp(ResVReg, ResType, I);
367 case TargetOpcode::G_FCMP:
368 return selectFCmp(ResVReg, ResType, I);
369
370 case TargetOpcode::G_FRAME_INDEX:
371 return selectFrameIndex(ResVReg, ResType, I);
372
373 case TargetOpcode::G_LOAD:
374 return selectLoad(ResVReg, ResType, I);
375 case TargetOpcode::G_STORE:
376 return selectStore(I);
377
378 case TargetOpcode::G_BR:
379 return selectBranch(I);
380 case TargetOpcode::G_BRCOND:
381 return selectBranchCond(I);
382
383 case TargetOpcode::G_PHI:
384 return selectPhi(ResVReg, ResType, I);
385
386 case TargetOpcode::G_FPTOSI:
387 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToS);
388 case TargetOpcode::G_FPTOUI:
389 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToU);
390
391 case TargetOpcode::G_SITOFP:
392 return selectIToF(ResVReg, ResType, I, true, SPIRV::OpConvertSToF);
393 case TargetOpcode::G_UITOFP:
394 return selectIToF(ResVReg, ResType, I, false, SPIRV::OpConvertUToF);
395
396 case TargetOpcode::G_CTPOP:
397 return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitCount);
398 case TargetOpcode::G_SMIN:
399 return selectExtInst(ResVReg, ResType, I, CL::s_min, GL::SMin);
400 case TargetOpcode::G_UMIN:
401 return selectExtInst(ResVReg, ResType, I, CL::u_min, GL::UMin);
402
403 case TargetOpcode::G_SMAX:
404 return selectExtInst(ResVReg, ResType, I, CL::s_max, GL::SMax);
405 case TargetOpcode::G_UMAX:
406 return selectExtInst(ResVReg, ResType, I, CL::u_max, GL::UMax);
407
408 case TargetOpcode::G_FMA:
409 return selectExtInst(ResVReg, ResType, I, CL::fma, GL::Fma);
410
411 case TargetOpcode::G_FPOW:
412 return selectExtInst(ResVReg, ResType, I, CL::pow, GL::Pow);
413 case TargetOpcode::G_FPOWI:
414 return selectExtInst(ResVReg, ResType, I, CL::pown);
415
416 case TargetOpcode::G_FEXP:
417 return selectExtInst(ResVReg, ResType, I, CL::exp, GL::Exp);
418 case TargetOpcode::G_FEXP2:
419 return selectExtInst(ResVReg, ResType, I, CL::exp2, GL::Exp2);
420
421 case TargetOpcode::G_FLOG:
422 return selectExtInst(ResVReg, ResType, I, CL::log, GL::Log);
423 case TargetOpcode::G_FLOG2:
424 return selectExtInst(ResVReg, ResType, I, CL::log2, GL::Log2);
425 case TargetOpcode::G_FLOG10:
426 return selectLog10(ResVReg, ResType, I);
427
428 case TargetOpcode::G_FABS:
429 return selectExtInst(ResVReg, ResType, I, CL::fabs, GL::FAbs);
430 case TargetOpcode::G_ABS:
431 return selectExtInst(ResVReg, ResType, I, CL::s_abs, GL::SAbs);
432
433 case TargetOpcode::G_FMINNUM:
434 case TargetOpcode::G_FMINIMUM:
435 return selectExtInst(ResVReg, ResType, I, CL::fmin, GL::FMin);
436 case TargetOpcode::G_FMAXNUM:
437 case TargetOpcode::G_FMAXIMUM:
438 return selectExtInst(ResVReg, ResType, I, CL::fmax, GL::FMax);
439
440 case TargetOpcode::G_FCOPYSIGN:
441 return selectExtInst(ResVReg, ResType, I, CL::copysign);
442
443 case TargetOpcode::G_FCEIL:
444 return selectExtInst(ResVReg, ResType, I, CL::ceil, GL::Ceil);
445 case TargetOpcode::G_FFLOOR:
446 return selectExtInst(ResVReg, ResType, I, CL::floor, GL::Floor);
447
448 case TargetOpcode::G_FCOS:
449 return selectExtInst(ResVReg, ResType, I, CL::cos, GL::Cos);
450 case TargetOpcode::G_FSIN:
451 return selectExtInst(ResVReg, ResType, I, CL::sin, GL::Sin);
452
453 case TargetOpcode::G_FSQRT:
454 return selectExtInst(ResVReg, ResType, I, CL::sqrt, GL::Sqrt);
455
456 case TargetOpcode::G_CTTZ:
457 case TargetOpcode::G_CTTZ_ZERO_UNDEF:
458 return selectExtInst(ResVReg, ResType, I, CL::ctz);
459 case TargetOpcode::G_CTLZ:
460 case TargetOpcode::G_CTLZ_ZERO_UNDEF:
461 return selectExtInst(ResVReg, ResType, I, CL::clz);
462
463 case TargetOpcode::G_INTRINSIC_ROUND:
464 return selectExtInst(ResVReg, ResType, I, CL::round, GL::Round);
465 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
466 return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
467 case TargetOpcode::G_INTRINSIC_TRUNC:
468 return selectExtInst(ResVReg, ResType, I, CL::trunc, GL::Trunc);
469 case TargetOpcode::G_FRINT:
470 case TargetOpcode::G_FNEARBYINT:
471 return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
472
473 case TargetOpcode::G_SMULH:
474 return selectExtInst(ResVReg, ResType, I, CL::s_mul_hi);
475 case TargetOpcode::G_UMULH:
476 return selectExtInst(ResVReg, ResType, I, CL::u_mul_hi);
477
478 case TargetOpcode::G_SEXT:
479 return selectExt(ResVReg, ResType, I, true);
480 case TargetOpcode::G_ANYEXT:
481 case TargetOpcode::G_ZEXT:
482 return selectExt(ResVReg, ResType, I, false);
483 case TargetOpcode::G_TRUNC:
484 return selectTrunc(ResVReg, ResType, I);
485 case TargetOpcode::G_FPTRUNC:
486 case TargetOpcode::G_FPEXT:
487 return selectUnOp(ResVReg, ResType, I, SPIRV::OpFConvert);
488
489 case TargetOpcode::G_PTRTOINT:
490 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertPtrToU);
491 case TargetOpcode::G_INTTOPTR:
492 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertUToPtr);
493 case TargetOpcode::G_BITCAST:
494 return selectBitcast(ResVReg, ResType, I);
495 case TargetOpcode::G_ADDRSPACE_CAST:
496 return selectAddrSpaceCast(ResVReg, ResType, I);
497 case TargetOpcode::G_PTR_ADD: {
498 // Currently, we get G_PTR_ADD only as a result of translating
499 // global variables, initialized with constant expressions like GV + Const
500 // (see test opencl/basic/progvar_prog_scope_init.ll).
501 // TODO: extend the handler once we have other cases.
502 assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
503 Register GV = I.getOperand(1).getReg();
504 MachineRegisterInfo::def_instr_iterator II = MRI->def_instr_begin(GV);
505 (void)II;
506 assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
507 (*II).getOpcode() == TargetOpcode::COPY ||
508 (*II).getOpcode() == SPIRV::OpVariable) &&
509 isImm(I.getOperand(2), MRI));
510 Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32, I, TII), I);
511 MachineBasicBlock &BB = *I.getParent();
512 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
513 .addDef(ResVReg)
514 .addUse(GR.getSPIRVTypeID(ResType))
515 .addImm(static_cast<uint32_t>(
516 SPIRV::Opcode::InBoundsPtrAccessChain))
517 .addUse(GV)
518 .addUse(Idx)
519 .addUse(I.getOperand(2).getReg());
520 return MIB.constrainAllUses(TII, TRI, RBI);
521 }
522
523 case TargetOpcode::G_ATOMICRMW_OR:
524 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicOr);
525 case TargetOpcode::G_ATOMICRMW_ADD:
526 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicIAdd);
527 case TargetOpcode::G_ATOMICRMW_AND:
528 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicAnd);
529 case TargetOpcode::G_ATOMICRMW_MAX:
530 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMax);
531 case TargetOpcode::G_ATOMICRMW_MIN:
532 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMin);
533 case TargetOpcode::G_ATOMICRMW_SUB:
534 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicISub);
535 case TargetOpcode::G_ATOMICRMW_XOR:
536 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicXor);
537 case TargetOpcode::G_ATOMICRMW_UMAX:
538 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMax);
539 case TargetOpcode::G_ATOMICRMW_UMIN:
540 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMin);
541 case TargetOpcode::G_ATOMICRMW_XCHG:
542 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicExchange);
543 case TargetOpcode::G_ATOMIC_CMPXCHG:
544 return selectAtomicCmpXchg(ResVReg, ResType, I);
545
546 case TargetOpcode::G_ATOMICRMW_FADD:
547 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFAddEXT);
548 case TargetOpcode::G_ATOMICRMW_FSUB:
549 // Translate G_ATOMICRMW_FSUB to OpAtomicFAddEXT with negative value operand
550 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFAddEXT,
551 SPIRV::OpFNegate);
552 case TargetOpcode::G_ATOMICRMW_FMIN:
553 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFMinEXT);
554 case TargetOpcode::G_ATOMICRMW_FMAX:
555 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFMaxEXT);
556
557 case TargetOpcode::G_FENCE:
558 return selectFence(I);
559
560 case TargetOpcode::G_STACKSAVE:
561 return selectStackSave(ResVReg, ResType, I);
562 case TargetOpcode::G_STACKRESTORE:
563 return selectStackRestore(I);
564
565 case TargetOpcode::G_UNMERGE_VALUES:
566 return selectUnmergeValues(I);
567
568 default:
569 return false;
570 }
571}
572
573bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
574 const SPIRVType *ResType,
576 CL::OpenCLExtInst CLInst) const {
577 return selectExtInst(ResVReg, ResType, I,
578 {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
579}
580
581bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
582 const SPIRVType *ResType,
584 CL::OpenCLExtInst CLInst,
585 GL::GLSLExtInst GLInst) const {
586 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
587 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
588 return selectExtInst(ResVReg, ResType, I, ExtInsts);
589}
590
591bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
592 const SPIRVType *ResType,
594 const ExtInstList &Insts) const {
595
596 for (const auto &Ex : Insts) {
597 SPIRV::InstructionSet::InstructionSet Set = Ex.first;
598 uint32_t Opcode = Ex.second;
599 if (STI.canUseExtInstSet(Set)) {
600 MachineBasicBlock &BB = *I.getParent();
601 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
602 .addDef(ResVReg)
603 .addUse(GR.getSPIRVTypeID(ResType))
604 .addImm(static_cast<uint32_t>(Set))
605 .addImm(Opcode);
606 const unsigned NumOps = I.getNumOperands();
607 for (unsigned i = 1; i < NumOps; ++i)
608 MIB.add(I.getOperand(i));
609 return MIB.constrainAllUses(TII, TRI, RBI);
610 }
611 }
612 return false;
613}
614
615bool SPIRVInstructionSelector::selectUnOpWithSrc(Register ResVReg,
616 const SPIRVType *ResType,
618 Register SrcReg,
619 unsigned Opcode) const {
620 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
621 .addDef(ResVReg)
622 .addUse(GR.getSPIRVTypeID(ResType))
623 .addUse(SrcReg)
624 .constrainAllUses(TII, TRI, RBI);
625}
626
627bool SPIRVInstructionSelector::selectUnOp(Register ResVReg,
628 const SPIRVType *ResType,
630 unsigned Opcode) const {
631 return selectUnOpWithSrc(ResVReg, ResType, I, I.getOperand(1).getReg(),
632 Opcode);
633}
634
635bool SPIRVInstructionSelector::selectBitcast(Register ResVReg,
636 const SPIRVType *ResType,
637 MachineInstr &I) const {
638 Register OpReg = I.getOperand(1).getReg();
639 SPIRVType *OpType = OpReg.isValid() ? GR.getSPIRVTypeForVReg(OpReg) : nullptr;
640 if (!GR.isBitcastCompatible(ResType, OpType))
641 report_fatal_error("incompatible result and operand types in a bitcast");
642 return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
643}
644
645static SPIRV::Scope::Scope getScope(SyncScope::ID Ord,
647 if (Ord == SyncScope::SingleThread || Ord == MMI->Work_ItemSSID)
648 return SPIRV::Scope::Invocation;
649 else if (Ord == SyncScope::System || Ord == MMI->DeviceSSID)
650 return SPIRV::Scope::Device;
651 else if (Ord == MMI->WorkGroupSSID)
652 return SPIRV::Scope::Workgroup;
653 else if (Ord == MMI->AllSVMDevicesSSID)
654 return SPIRV::Scope::CrossDevice;
655 else if (Ord == MMI->SubGroupSSID)
656 return SPIRV::Scope::Subgroup;
657 else
658 // OpenCL approach is: "The functions that do not have memory_scope argument
659 // have the same semantics as the corresponding functions with the
660 // memory_scope argument set to memory_scope_device." See ref.: //
661 // https://registry.khronos.org/OpenCL/specs/3.0-unified/html/OpenCL_C.html#atomic-functions
662 // In our case if the scope is unknown, assuming that SPIR-V code is to be
663 // consumed in an OpenCL environment, we use the same approach and set the
664 // scope to memory_scope_device.
665 return SPIRV::Scope::Device;
666}
667
669 MachineInstrBuilder &MIB) {
670 uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
671 if (MemOp->isVolatile())
672 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
673 if (MemOp->isNonTemporal())
674 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
675 if (MemOp->getAlign().value())
676 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
677
678 if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
679 MIB.addImm(SpvMemOp);
680 if (SpvMemOp & static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
681 MIB.addImm(MemOp->getAlign().value());
682 }
683}
684
686 uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
687 if (Flags & MachineMemOperand::Flags::MOVolatile)
688 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
689 if (Flags & MachineMemOperand::Flags::MONonTemporal)
690 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
691
692 if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None))
693 MIB.addImm(SpvMemOp);
694}
695
696bool SPIRVInstructionSelector::selectLoad(Register ResVReg,
697 const SPIRVType *ResType,
698 MachineInstr &I) const {
699 unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
700 Register Ptr = I.getOperand(1 + OpOffset).getReg();
701 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
702 .addDef(ResVReg)
703 .addUse(GR.getSPIRVTypeID(ResType))
704 .addUse(Ptr);
705 if (!I.getNumMemOperands()) {
706 assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
707 I.getOpcode() ==
708 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
709 addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
710 } else {
711 addMemoryOperands(*I.memoperands_begin(), MIB);
712 }
713 return MIB.constrainAllUses(TII, TRI, RBI);
714}
715
716bool SPIRVInstructionSelector::selectStore(MachineInstr &I) const {
717 unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
718 Register StoreVal = I.getOperand(0 + OpOffset).getReg();
719 Register Ptr = I.getOperand(1 + OpOffset).getReg();
720 MachineBasicBlock &BB = *I.getParent();
721 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpStore))
722 .addUse(Ptr)
723 .addUse(StoreVal);
724 if (!I.getNumMemOperands()) {
725 assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
726 I.getOpcode() ==
727 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
728 addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
729 } else {
730 addMemoryOperands(*I.memoperands_begin(), MIB);
731 }
732 return MIB.constrainAllUses(TII, TRI, RBI);
733}
734
735bool SPIRVInstructionSelector::selectStackSave(Register ResVReg,
736 const SPIRVType *ResType,
737 MachineInstr &I) const {
738 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
740 "llvm.stacksave intrinsic: this instruction requires the following "
741 "SPIR-V extension: SPV_INTEL_variable_length_array",
742 false);
743 MachineBasicBlock &BB = *I.getParent();
744 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSaveMemoryINTEL))
745 .addDef(ResVReg)
746 .addUse(GR.getSPIRVTypeID(ResType))
747 .constrainAllUses(TII, TRI, RBI);
748}
749
750bool SPIRVInstructionSelector::selectStackRestore(MachineInstr &I) const {
751 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
753 "llvm.stackrestore intrinsic: this instruction requires the following "
754 "SPIR-V extension: SPV_INTEL_variable_length_array",
755 false);
756 if (!I.getOperand(0).isReg())
757 return false;
758 MachineBasicBlock &BB = *I.getParent();
759 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpRestoreMemoryINTEL))
760 .addUse(I.getOperand(0).getReg())
761 .constrainAllUses(TII, TRI, RBI);
762}
763
764bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
765 MachineInstr &I) const {
766 MachineBasicBlock &BB = *I.getParent();
767 Register SrcReg = I.getOperand(1).getReg();
768 if (I.getOpcode() == TargetOpcode::G_MEMSET) {
769 assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
770 unsigned Val = getIConstVal(I.getOperand(1).getReg(), MRI);
771 unsigned Num = getIConstVal(I.getOperand(2).getReg(), MRI);
772 SPIRVType *ValTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
773 SPIRVType *ArrTy = GR.getOrCreateSPIRVArrayType(ValTy, Num, I, TII);
774 Register Const = GR.getOrCreateConsIntArray(Val, I, ArrTy, TII);
775 SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType(
776 ArrTy, I, TII, SPIRV::StorageClass::UniformConstant);
777 // TODO: check if we have such GV, add init, use buildGlobalVariable.
778 Function &CurFunction = GR.CurMF->getFunction();
779 Type *LLVMArrTy =
780 ArrayType::get(IntegerType::get(CurFunction.getContext(), 8), Num);
781 // Module takes ownership of the global var.
782 GlobalVariable *GV = new GlobalVariable(*CurFunction.getParent(), LLVMArrTy,
784 Constant::getNullValue(LLVMArrTy));
785 Register VarReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
786 GR.add(GV, GR.CurMF, VarReg);
787
788 buildOpDecorate(VarReg, I, TII, SPIRV::Decoration::Constant, {});
789 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
790 .addDef(VarReg)
791 .addUse(GR.getSPIRVTypeID(VarTy))
792 .addImm(SPIRV::StorageClass::UniformConstant)
793 .addUse(Const)
794 .constrainAllUses(TII, TRI, RBI);
795 SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType(
796 ValTy, I, TII, SPIRV::StorageClass::UniformConstant);
797 SrcReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
798 selectUnOpWithSrc(SrcReg, SourceTy, I, VarReg, SPIRV::OpBitcast);
799 }
800 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCopyMemorySized))
801 .addUse(I.getOperand(0).getReg())
802 .addUse(SrcReg)
803 .addUse(I.getOperand(2).getReg());
804 if (I.getNumMemOperands())
805 addMemoryOperands(*I.memoperands_begin(), MIB);
806 bool Result = MIB.constrainAllUses(TII, TRI, RBI);
807 if (ResVReg.isValid() && ResVReg != MIB->getOperand(0).getReg())
808 BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg)
809 .addUse(MIB->getOperand(0).getReg());
810 return Result;
811}
812
813bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
814 const SPIRVType *ResType,
816 unsigned NewOpcode,
817 unsigned NegateOpcode) const {
818 assert(I.hasOneMemOperand());
819 const MachineMemOperand *MemOp = *I.memoperands_begin();
821 static_cast<uint32_t>(getScope(MemOp->getSyncScopeID(), MMI));
822 Register ScopeReg = buildI32Constant(Scope, I);
823
824 Register Ptr = I.getOperand(1).getReg();
825 // TODO: Changed as it's implemented in the translator. See test/atomicrmw.ll
826 // auto ScSem =
827 // getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr));
828 AtomicOrdering AO = MemOp->getSuccessOrdering();
829 uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
830 Register MemSemReg = buildI32Constant(MemSem /*| ScSem*/, I);
831
832 bool Result = false;
833 Register ValueReg = I.getOperand(2).getReg();
834 if (NegateOpcode != 0) {
835 // Translation with negative value operand is requested
836 Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
837 Result |= selectUnOpWithSrc(TmpReg, ResType, I, ValueReg, NegateOpcode);
838 ValueReg = TmpReg;
839 }
840
841 Result |= BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode))
842 .addDef(ResVReg)
843 .addUse(GR.getSPIRVTypeID(ResType))
844 .addUse(Ptr)
845 .addUse(ScopeReg)
846 .addUse(MemSemReg)
847 .addUse(ValueReg)
848 .constrainAllUses(TII, TRI, RBI);
849 return Result;
850}
851
852bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &I) const {
853 unsigned ArgI = I.getNumOperands() - 1;
854 Register SrcReg =
855 I.getOperand(ArgI).isReg() ? I.getOperand(ArgI).getReg() : Register(0);
856 SPIRVType *DefType =
857 SrcReg.isValid() ? GR.getSPIRVTypeForVReg(SrcReg) : nullptr;
858 if (!DefType || DefType->getOpcode() != SPIRV::OpTypeVector)
860 "cannot select G_UNMERGE_VALUES with a non-vector argument");
861
862 SPIRVType *ScalarType =
863 GR.getSPIRVTypeForVReg(DefType->getOperand(1).getReg());
864 MachineBasicBlock &BB = *I.getParent();
865 bool Res = false;
866 for (unsigned i = 0; i < I.getNumDefs(); ++i) {
867 Register ResVReg = I.getOperand(i).getReg();
868 SPIRVType *ResType = GR.getSPIRVTypeForVReg(ResVReg);
869 if (!ResType) {
870 // There was no "assign type" actions, let's fix this now
871 ResType = ScalarType;
872 MRI->setRegClass(ResVReg, &SPIRV::IDRegClass);
873 MRI->setType(ResVReg, LLT::scalar(GR.getScalarOrVectorBitWidth(ResType)));
874 GR.assignSPIRVTypeToVReg(ResType, ResVReg, *GR.CurMF);
875 }
876 auto MIB =
877 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
878 .addDef(ResVReg)
879 .addUse(GR.getSPIRVTypeID(ResType))
880 .addUse(SrcReg)
881 .addImm(static_cast<int64_t>(i));
882 Res |= MIB.constrainAllUses(TII, TRI, RBI);
883 }
884 return Res;
885}
886
887bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const {
888 AtomicOrdering AO = AtomicOrdering(I.getOperand(0).getImm());
889 uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
890 Register MemSemReg = buildI32Constant(MemSem, I);
891 SyncScope::ID Ord = SyncScope::ID(I.getOperand(1).getImm());
892 uint32_t Scope = static_cast<uint32_t>(getScope(Ord, MMI));
893 Register ScopeReg = buildI32Constant(Scope, I);
894 MachineBasicBlock &BB = *I.getParent();
895 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier))
896 .addUse(ScopeReg)
897 .addUse(MemSemReg)
898 .constrainAllUses(TII, TRI, RBI);
899}
900
901bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
902 const SPIRVType *ResType,
903 MachineInstr &I) const {
904 Register ScopeReg;
905 Register MemSemEqReg;
906 Register MemSemNeqReg;
907 Register Ptr = I.getOperand(2).getReg();
908 if (!isa<GIntrinsic>(I)) {
909 assert(I.hasOneMemOperand());
910 const MachineMemOperand *MemOp = *I.memoperands_begin();
911 unsigned Scope =
912 static_cast<uint32_t>(getScope(MemOp->getSyncScopeID(), MMI));
913 ScopeReg = buildI32Constant(Scope, I);
914
915 unsigned ScSem = static_cast<uint32_t>(
916 getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr)));
917 AtomicOrdering AO = MemOp->getSuccessOrdering();
918 unsigned MemSemEq = static_cast<uint32_t>(getMemSemantics(AO)) | ScSem;
919 MemSemEqReg = buildI32Constant(MemSemEq, I);
920 AtomicOrdering FO = MemOp->getFailureOrdering();
921 unsigned MemSemNeq = static_cast<uint32_t>(getMemSemantics(FO)) | ScSem;
922 MemSemNeqReg =
923 MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq, I);
924 } else {
925 ScopeReg = I.getOperand(5).getReg();
926 MemSemEqReg = I.getOperand(6).getReg();
927 MemSemNeqReg = I.getOperand(7).getReg();
928 }
929
930 Register Cmp = I.getOperand(3).getReg();
931 Register Val = I.getOperand(4).getReg();
932 SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
933 Register ACmpRes = MRI->createVirtualRegister(&SPIRV::IDRegClass);
934 const DebugLoc &DL = I.getDebugLoc();
935 bool Result =
936 BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange))
937 .addDef(ACmpRes)
938 .addUse(GR.getSPIRVTypeID(SpvValTy))
939 .addUse(Ptr)
940 .addUse(ScopeReg)
941 .addUse(MemSemEqReg)
942 .addUse(MemSemNeqReg)
943 .addUse(Val)
944 .addUse(Cmp)
945 .constrainAllUses(TII, TRI, RBI);
946 Register CmpSuccReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
947 SPIRVType *BoolTy = GR.getOrCreateSPIRVBoolType(I, TII);
948 Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpIEqual))
949 .addDef(CmpSuccReg)
950 .addUse(GR.getSPIRVTypeID(BoolTy))
951 .addUse(ACmpRes)
952 .addUse(Cmp)
953 .constrainAllUses(TII, TRI, RBI);
954 Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
955 Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
956 .addDef(TmpReg)
957 .addUse(GR.getSPIRVTypeID(ResType))
958 .addUse(ACmpRes)
959 .addUse(GR.getOrCreateUndef(I, ResType, TII))
960 .addImm(0)
961 .constrainAllUses(TII, TRI, RBI);
962 Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
963 .addDef(ResVReg)
964 .addUse(GR.getSPIRVTypeID(ResType))
965 .addUse(CmpSuccReg)
966 .addUse(TmpReg)
967 .addImm(1)
968 .constrainAllUses(TII, TRI, RBI);
969 return Result;
970}
971
972static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC) {
973 switch (SC) {
974 case SPIRV::StorageClass::Workgroup:
975 case SPIRV::StorageClass::CrossWorkgroup:
976 case SPIRV::StorageClass::Function:
977 return true;
978 default:
979 return false;
980 }
981}
982
983static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC) {
984 switch (SC) {
985 case SPIRV::StorageClass::DeviceOnlyINTEL:
986 case SPIRV::StorageClass::HostOnlyINTEL:
987 return true;
988 default:
989 return false;
990 }
991}
992
993// In SPIR-V address space casting can only happen to and from the Generic
994// storage class. We can also only cast Workgroup, CrossWorkgroup, or Function
995// pointers to and from Generic pointers. As such, we can convert e.g. from
996// Workgroup to Function by going via a Generic pointer as an intermediary. All
997// other combinations can only be done by a bitcast, and are probably not safe.
998bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg,
999 const SPIRVType *ResType,
1000 MachineInstr &I) const {
1001 // If the AddrSpaceCast user is single and in OpConstantComposite or
1002 // OpVariable, we should select OpSpecConstantOp.
1003 auto UIs = MRI->use_instructions(ResVReg);
1004 if (!UIs.empty() && ++UIs.begin() == UIs.end() &&
1005 (UIs.begin()->getOpcode() == SPIRV::OpConstantComposite ||
1006 UIs.begin()->getOpcode() == SPIRV::OpVariable ||
1007 isSpvIntrinsic(*UIs.begin(), Intrinsic::spv_init_global))) {
1008 Register NewReg = I.getOperand(1).getReg();
1009 MachineBasicBlock &BB = *I.getParent();
1010 SPIRVType *SpvBaseTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
1011 ResType = GR.getOrCreateSPIRVPointerType(SpvBaseTy, I, TII,
1012 SPIRV::StorageClass::Generic);
1013 bool Result =
1014 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
1015 .addDef(ResVReg)
1016 .addUse(GR.getSPIRVTypeID(ResType))
1017 .addImm(static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric))
1018 .addUse(NewReg)
1019 .constrainAllUses(TII, TRI, RBI);
1020 return Result;
1021 }
1022 Register SrcPtr = I.getOperand(1).getReg();
1023 SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
1024 SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtr);
1025 SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResVReg);
1026
1027 // don't generate a cast between identical storage classes
1028 if (SrcSC == DstSC)
1029 return true;
1030
1031 // Casting from an eligible pointer to Generic.
1032 if (DstSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(SrcSC))
1033 return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric);
1034 // Casting from Generic to an eligible pointer.
1035 if (SrcSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(DstSC))
1036 return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr);
1037 // Casting between 2 eligible pointers using Generic as an intermediary.
1038 if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) {
1039 Register Tmp = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1040 SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
1041 SrcPtrTy, I, TII, SPIRV::StorageClass::Generic);
1042 MachineBasicBlock &BB = *I.getParent();
1043 const DebugLoc &DL = I.getDebugLoc();
1044 bool Success = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric))
1045 .addDef(Tmp)
1046 .addUse(GR.getSPIRVTypeID(GenericPtrTy))
1047 .addUse(SrcPtr)
1048 .constrainAllUses(TII, TRI, RBI);
1049 return Success && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr))
1050 .addDef(ResVReg)
1051 .addUse(GR.getSPIRVTypeID(ResType))
1052 .addUse(Tmp)
1053 .constrainAllUses(TII, TRI, RBI);
1054 }
1055
1056 // Check if instructions from the SPV_INTEL_usm_storage_classes extension may
1057 // be applied
1058 if (isUSMStorageClass(SrcSC) && DstSC == SPIRV::StorageClass::CrossWorkgroup)
1059 return selectUnOp(ResVReg, ResType, I,
1060 SPIRV::OpPtrCastToCrossWorkgroupINTEL);
1061 if (SrcSC == SPIRV::StorageClass::CrossWorkgroup && isUSMStorageClass(DstSC))
1062 return selectUnOp(ResVReg, ResType, I,
1063 SPIRV::OpCrossWorkgroupCastToPtrINTEL);
1064
1065 // TODO Should this case just be disallowed completely?
1066 // We're casting 2 other arbitrary address spaces, so have to bitcast.
1067 return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
1068}
1069
1070static unsigned getFCmpOpcode(unsigned PredNum) {
1071 auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1072 switch (Pred) {
1073 case CmpInst::FCMP_OEQ:
1074 return SPIRV::OpFOrdEqual;
1075 case CmpInst::FCMP_OGE:
1076 return SPIRV::OpFOrdGreaterThanEqual;
1077 case CmpInst::FCMP_OGT:
1078 return SPIRV::OpFOrdGreaterThan;
1079 case CmpInst::FCMP_OLE:
1080 return SPIRV::OpFOrdLessThanEqual;
1081 case CmpInst::FCMP_OLT:
1082 return SPIRV::OpFOrdLessThan;
1083 case CmpInst::FCMP_ONE:
1084 return SPIRV::OpFOrdNotEqual;
1085 case CmpInst::FCMP_ORD:
1086 return SPIRV::OpOrdered;
1087 case CmpInst::FCMP_UEQ:
1088 return SPIRV::OpFUnordEqual;
1089 case CmpInst::FCMP_UGE:
1090 return SPIRV::OpFUnordGreaterThanEqual;
1091 case CmpInst::FCMP_UGT:
1092 return SPIRV::OpFUnordGreaterThan;
1093 case CmpInst::FCMP_ULE:
1094 return SPIRV::OpFUnordLessThanEqual;
1095 case CmpInst::FCMP_ULT:
1096 return SPIRV::OpFUnordLessThan;
1097 case CmpInst::FCMP_UNE:
1098 return SPIRV::OpFUnordNotEqual;
1099 case CmpInst::FCMP_UNO:
1100 return SPIRV::OpUnordered;
1101 default:
1102 llvm_unreachable("Unknown predicate type for FCmp");
1103 }
1104}
1105
1106static unsigned getICmpOpcode(unsigned PredNum) {
1107 auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1108 switch (Pred) {
1109 case CmpInst::ICMP_EQ:
1110 return SPIRV::OpIEqual;
1111 case CmpInst::ICMP_NE:
1112 return SPIRV::OpINotEqual;
1113 case CmpInst::ICMP_SGE:
1114 return SPIRV::OpSGreaterThanEqual;
1115 case CmpInst::ICMP_SGT:
1116 return SPIRV::OpSGreaterThan;
1117 case CmpInst::ICMP_SLE:
1118 return SPIRV::OpSLessThanEqual;
1119 case CmpInst::ICMP_SLT:
1120 return SPIRV::OpSLessThan;
1121 case CmpInst::ICMP_UGE:
1122 return SPIRV::OpUGreaterThanEqual;
1123 case CmpInst::ICMP_UGT:
1124 return SPIRV::OpUGreaterThan;
1125 case CmpInst::ICMP_ULE:
1126 return SPIRV::OpULessThanEqual;
1127 case CmpInst::ICMP_ULT:
1128 return SPIRV::OpULessThan;
1129 default:
1130 llvm_unreachable("Unknown predicate type for ICmp");
1131 }
1132}
1133
1134static unsigned getPtrCmpOpcode(unsigned Pred) {
1135 switch (static_cast<CmpInst::Predicate>(Pred)) {
1136 case CmpInst::ICMP_EQ:
1137 return SPIRV::OpPtrEqual;
1138 case CmpInst::ICMP_NE:
1139 return SPIRV::OpPtrNotEqual;
1140 default:
1141 llvm_unreachable("Unknown predicate type for pointer comparison");
1142 }
1143}
1144
1145// Return the logical operation, or abort if none exists.
1146static unsigned getBoolCmpOpcode(unsigned PredNum) {
1147 auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1148 switch (Pred) {
1149 case CmpInst::ICMP_EQ:
1150 return SPIRV::OpLogicalEqual;
1151 case CmpInst::ICMP_NE:
1152 return SPIRV::OpLogicalNotEqual;
1153 default:
1154 llvm_unreachable("Unknown predicate type for Bool comparison");
1155 }
1156}
1157
1158bool SPIRVInstructionSelector::selectBitreverse(Register ResVReg,
1159 const SPIRVType *ResType,
1160 MachineInstr &I) const {
1161 MachineBasicBlock &BB = *I.getParent();
1162 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitReverse))
1163 .addDef(ResVReg)
1164 .addUse(GR.getSPIRVTypeID(ResType))
1165 .addUse(I.getOperand(1).getReg())
1166 .constrainAllUses(TII, TRI, RBI);
1167}
1168
1169bool SPIRVInstructionSelector::selectFreeze(Register ResVReg,
1170 const SPIRVType *ResType,
1171 MachineInstr &I) const {
1172 // There is no way to implement `freeze` correctly without support on SPIR-V
1173 // standard side, but we may at least address a simple (static) case when
1174 // undef/poison value presence is obvious. The main benefit of even
1175 // incomplete `freeze` support is preventing of translation from crashing due
1176 // to lack of support on legalization and instruction selection steps.
1177 if (!I.getOperand(0).isReg() || !I.getOperand(1).isReg())
1178 return false;
1179 Register OpReg = I.getOperand(1).getReg();
1180 if (MachineInstr *Def = MRI->getVRegDef(OpReg)) {
1181 Register Reg;
1182 switch (Def->getOpcode()) {
1183 case SPIRV::ASSIGN_TYPE:
1184 if (MachineInstr *AssignToDef =
1185 MRI->getVRegDef(Def->getOperand(1).getReg())) {
1186 if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1187 Reg = Def->getOperand(2).getReg();
1188 }
1189 break;
1190 case SPIRV::OpUndef:
1191 Reg = Def->getOperand(1).getReg();
1192 break;
1193 }
1194 unsigned DestOpCode;
1195 if (Reg.isValid()) {
1196 DestOpCode = SPIRV::OpConstantNull;
1197 } else {
1198 DestOpCode = TargetOpcode::COPY;
1199 Reg = OpReg;
1200 }
1201 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(DestOpCode))
1202 .addDef(I.getOperand(0).getReg())
1203 .addUse(Reg)
1204 .constrainAllUses(TII, TRI, RBI);
1205 }
1206 return false;
1207}
1208
1209bool SPIRVInstructionSelector::selectConstVector(Register ResVReg,
1210 const SPIRVType *ResType,
1211 MachineInstr &I) const {
1212 // TODO: only const case is supported for now.
1213 assert(std::all_of(
1214 I.operands_begin(), I.operands_end(), [this](const MachineOperand &MO) {
1215 if (MO.isDef())
1216 return true;
1217 if (!MO.isReg())
1218 return false;
1219 SPIRVType *ConstTy = this->MRI->getVRegDef(MO.getReg());
1220 assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
1221 ConstTy->getOperand(1).isReg());
1222 Register ConstReg = ConstTy->getOperand(1).getReg();
1223 const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
1224 assert(Const);
1225 return (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
1226 Const->getOpcode() == TargetOpcode::G_FCONSTANT);
1227 }));
1228
1229 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
1230 TII.get(SPIRV::OpConstantComposite))
1231 .addDef(ResVReg)
1232 .addUse(GR.getSPIRVTypeID(ResType));
1233 for (unsigned i = I.getNumExplicitDefs(); i < I.getNumExplicitOperands(); ++i)
1234 MIB.addUse(I.getOperand(i).getReg());
1235 return MIB.constrainAllUses(TII, TRI, RBI);
1236}
1237
1239 const SPIRVType *ResType) {
1240 Register OpReg = ResType->getOperand(2).getReg();
1241 SPIRVType *OpDef = MRI->getVRegDef(OpReg);
1242 if (!OpDef)
1243 return 0;
1244 if (OpDef->getOpcode() == SPIRV::ASSIGN_TYPE &&
1245 OpDef->getOperand(1).isReg()) {
1246 if (SPIRVType *RefDef = MRI->getVRegDef(OpDef->getOperand(1).getReg()))
1247 OpDef = RefDef;
1248 }
1249 unsigned N = OpDef->getOpcode() == TargetOpcode::G_CONSTANT
1250 ? OpDef->getOperand(1).getCImm()->getValue().getZExtValue()
1251 : 0;
1252 return N;
1253}
1254
1255// Return true if the type represents a constant register
1257 if (OpDef->getOpcode() == SPIRV::ASSIGN_TYPE &&
1258 OpDef->getOperand(1).isReg()) {
1259 if (SPIRVType *RefDef = MRI->getVRegDef(OpDef->getOperand(1).getReg()))
1260 OpDef = RefDef;
1261 }
1262 return OpDef->getOpcode() == TargetOpcode::G_CONSTANT ||
1263 OpDef->getOpcode() == TargetOpcode::G_FCONSTANT;
1264}
1265
1266// Return true if the virtual register represents a constant
1268 if (SPIRVType *OpDef = MRI->getVRegDef(OpReg))
1269 return isConstReg(MRI, OpDef);
1270 return false;
1271}
1272
1273bool SPIRVInstructionSelector::selectSplatVector(Register ResVReg,
1274 const SPIRVType *ResType,
1275 MachineInstr &I) const {
1276 unsigned N = 0;
1277 if (ResType->getOpcode() == SPIRV::OpTypeVector)
1278 N = GR.getScalarOrVectorComponentCount(ResType);
1279 else if (ResType->getOpcode() == SPIRV::OpTypeArray)
1280 N = getArrayComponentCount(MRI, ResType);
1281 else
1282 report_fatal_error("Cannot select G_SPLAT_VECTOR with a non-vector result");
1283
1284 unsigned OpIdx = I.getNumExplicitDefs();
1285 if (!I.getOperand(OpIdx).isReg())
1286 report_fatal_error("Unexpected argument in G_SPLAT_VECTOR");
1287
1288 // check if we may construct a constant vector
1289 Register OpReg = I.getOperand(OpIdx).getReg();
1290 bool IsConst = isConstReg(MRI, OpReg);
1291
1292 if (!IsConst && N < 2)
1294 "There must be at least two constituent operands in a vector");
1295
1296 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
1297 TII.get(IsConst ? SPIRV::OpConstantComposite
1298 : SPIRV::OpCompositeConstruct))
1299 .addDef(ResVReg)
1300 .addUse(GR.getSPIRVTypeID(ResType));
1301 for (unsigned i = 0; i < N; ++i)
1302 MIB.addUse(OpReg);
1303 return MIB.constrainAllUses(TII, TRI, RBI);
1304}
1305
1306bool SPIRVInstructionSelector::selectCmp(Register ResVReg,
1307 const SPIRVType *ResType,
1308 unsigned CmpOpc,
1309 MachineInstr &I) const {
1310 Register Cmp0 = I.getOperand(2).getReg();
1311 Register Cmp1 = I.getOperand(3).getReg();
1312 assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
1313 GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
1314 "CMP operands should have the same type");
1315 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CmpOpc))
1316 .addDef(ResVReg)
1317 .addUse(GR.getSPIRVTypeID(ResType))
1318 .addUse(Cmp0)
1319 .addUse(Cmp1)
1320 .constrainAllUses(TII, TRI, RBI);
1321}
1322
1323bool SPIRVInstructionSelector::selectICmp(Register ResVReg,
1324 const SPIRVType *ResType,
1325 MachineInstr &I) const {
1326 auto Pred = I.getOperand(1).getPredicate();
1327 unsigned CmpOpc;
1328
1329 Register CmpOperand = I.getOperand(2).getReg();
1330 if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
1331 CmpOpc = getPtrCmpOpcode(Pred);
1332 else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
1333 CmpOpc = getBoolCmpOpcode(Pred);
1334 else
1335 CmpOpc = getICmpOpcode(Pred);
1336 return selectCmp(ResVReg, ResType, CmpOpc, I);
1337}
1338
1339void SPIRVInstructionSelector::renderFImm32(MachineInstrBuilder &MIB,
1340 const MachineInstr &I,
1341 int OpIdx) const {
1342 assert(I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
1343 "Expected G_FCONSTANT");
1344 const ConstantFP *FPImm = I.getOperand(1).getFPImm();
1345 addNumImm(FPImm->getValueAPF().bitcastToAPInt(), MIB);
1346}
1347
1348void SPIRVInstructionSelector::renderImm32(MachineInstrBuilder &MIB,
1349 const MachineInstr &I,
1350 int OpIdx) const {
1351 assert(I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1352 "Expected G_CONSTANT");
1353 addNumImm(I.getOperand(1).getCImm()->getValue(), MIB);
1354}
1355
1357SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I,
1358 const SPIRVType *ResType) const {
1359 Type *LLVMTy = IntegerType::get(GR.CurMF->getFunction().getContext(), 32);
1360 const SPIRVType *SpvI32Ty =
1361 ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32, I, TII);
1362 // Find a constant in DT or build a new one.
1363 auto ConstInt = ConstantInt::get(LLVMTy, Val);
1364 Register NewReg = GR.find(ConstInt, GR.CurMF);
1365 if (!NewReg.isValid()) {
1366 NewReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
1367 GR.add(ConstInt, GR.CurMF, NewReg);
1369 MachineBasicBlock &BB = *I.getParent();
1370 if (Val == 0) {
1371 MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1372 .addDef(NewReg)
1373 .addUse(GR.getSPIRVTypeID(SpvI32Ty));
1374 } else {
1375 MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
1376 .addDef(NewReg)
1377 .addUse(GR.getSPIRVTypeID(SpvI32Ty))
1378 .addImm(APInt(32, Val).getZExtValue());
1379 }
1381 }
1382 return NewReg;
1383}
1384
1385bool SPIRVInstructionSelector::selectFCmp(Register ResVReg,
1386 const SPIRVType *ResType,
1387 MachineInstr &I) const {
1388 unsigned CmpOp = getFCmpOpcode(I.getOperand(1).getPredicate());
1389 return selectCmp(ResVReg, ResType, CmpOp, I);
1390}
1391
1392Register SPIRVInstructionSelector::buildZerosVal(const SPIRVType *ResType,
1393 MachineInstr &I) const {
1394 if (ResType->getOpcode() == SPIRV::OpTypeVector)
1395 return GR.getOrCreateConsIntVector(0, I, ResType, TII);
1396 return GR.getOrCreateConstInt(0, I, ResType, TII);
1397}
1398
1399Register SPIRVInstructionSelector::buildOnesVal(bool AllOnes,
1400 const SPIRVType *ResType,
1401 MachineInstr &I) const {
1402 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1403 APInt One =
1404 AllOnes ? APInt::getAllOnes(BitWidth) : APInt::getOneBitSet(BitWidth, 0);
1405 if (ResType->getOpcode() == SPIRV::OpTypeVector)
1406 return GR.getOrCreateConsIntVector(One.getZExtValue(), I, ResType, TII);
1407 return GR.getOrCreateConstInt(One.getZExtValue(), I, ResType, TII);
1408}
1409
1410bool SPIRVInstructionSelector::selectSelect(Register ResVReg,
1411 const SPIRVType *ResType,
1412 MachineInstr &I,
1413 bool IsSigned) const {
1414 // To extend a bool, we need to use OpSelect between constants.
1415 Register ZeroReg = buildZerosVal(ResType, I);
1416 Register OneReg = buildOnesVal(IsSigned, ResType, I);
1417 bool IsScalarBool =
1418 GR.isScalarOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool);
1419 unsigned Opcode =
1420 IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectSIVCond;
1421 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
1422 .addDef(ResVReg)
1423 .addUse(GR.getSPIRVTypeID(ResType))
1424 .addUse(I.getOperand(1).getReg())
1425 .addUse(OneReg)
1426 .addUse(ZeroReg)
1427 .constrainAllUses(TII, TRI, RBI);
1428}
1429
1430bool SPIRVInstructionSelector::selectIToF(Register ResVReg,
1431 const SPIRVType *ResType,
1432 MachineInstr &I, bool IsSigned,
1433 unsigned Opcode) const {
1434 Register SrcReg = I.getOperand(1).getReg();
1435 // We can convert bool value directly to float type without OpConvert*ToF,
1436 // however the translator generates OpSelect+OpConvert*ToF, so we do the same.
1437 if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
1438 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1439 SPIRVType *TmpType = GR.getOrCreateSPIRVIntegerType(BitWidth, I, TII);
1440 if (ResType->getOpcode() == SPIRV::OpTypeVector) {
1441 const unsigned NumElts = ResType->getOperand(2).getImm();
1442 TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII);
1443 }
1444 SrcReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1445 selectSelect(SrcReg, TmpType, I, false);
1446 }
1447 return selectUnOpWithSrc(ResVReg, ResType, I, SrcReg, Opcode);
1448}
1449
1450bool SPIRVInstructionSelector::selectExt(Register ResVReg,
1451 const SPIRVType *ResType,
1452 MachineInstr &I, bool IsSigned) const {
1453 if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool))
1454 return selectSelect(ResVReg, ResType, I, IsSigned);
1455 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1456 return selectUnOp(ResVReg, ResType, I, Opcode);
1457}
1458
1459bool SPIRVInstructionSelector::selectIntToBool(Register IntReg,
1460 Register ResVReg,
1461 MachineInstr &I,
1462 const SPIRVType *IntTy,
1463 const SPIRVType *BoolTy) const {
1464 // To truncate to a bool, we use OpBitwiseAnd 1 and OpINotEqual to zero.
1465 Register BitIntReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1466 bool IsVectorTy = IntTy->getOpcode() == SPIRV::OpTypeVector;
1467 unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
1468 Register Zero = buildZerosVal(IntTy, I);
1469 Register One = buildOnesVal(false, IntTy, I);
1470 MachineBasicBlock &BB = *I.getParent();
1471 BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1472 .addDef(BitIntReg)
1473 .addUse(GR.getSPIRVTypeID(IntTy))
1474 .addUse(IntReg)
1475 .addUse(One)
1476 .constrainAllUses(TII, TRI, RBI);
1477 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
1478 .addDef(ResVReg)
1479 .addUse(GR.getSPIRVTypeID(BoolTy))
1480 .addUse(BitIntReg)
1481 .addUse(Zero)
1482 .constrainAllUses(TII, TRI, RBI);
1483}
1484
1485bool SPIRVInstructionSelector::selectTrunc(Register ResVReg,
1486 const SPIRVType *ResType,
1487 MachineInstr &I) const {
1488 if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool)) {
1489 Register IntReg = I.getOperand(1).getReg();
1490 const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
1491 return selectIntToBool(IntReg, ResVReg, I, ArgType, ResType);
1492 }
1493 bool IsSigned = GR.isScalarOrVectorSigned(ResType);
1494 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1495 return selectUnOp(ResVReg, ResType, I, Opcode);
1496}
1497
1498bool SPIRVInstructionSelector::selectConst(Register ResVReg,
1499 const SPIRVType *ResType,
1500 const APInt &Imm,
1501 MachineInstr &I) const {
1502 unsigned TyOpcode = ResType->getOpcode();
1503 assert(TyOpcode != SPIRV::OpTypePointer || Imm.isZero());
1504 MachineBasicBlock &BB = *I.getParent();
1505 if ((TyOpcode == SPIRV::OpTypePointer || TyOpcode == SPIRV::OpTypeEvent) &&
1506 Imm.isZero())
1507 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1508 .addDef(ResVReg)
1509 .addUse(GR.getSPIRVTypeID(ResType))
1510 .constrainAllUses(TII, TRI, RBI);
1511 if (TyOpcode == SPIRV::OpTypeInt) {
1512 assert(Imm.getBitWidth() <= 64 && "Unsupported integer width!");
1513 Register Reg = GR.getOrCreateConstInt(Imm.getZExtValue(), I, ResType, TII);
1514 if (Reg == ResVReg)
1515 return true;
1516 return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
1517 .addDef(ResVReg)
1518 .addUse(Reg)
1519 .constrainAllUses(TII, TRI, RBI);
1520 }
1521 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
1522 .addDef(ResVReg)
1523 .addUse(GR.getSPIRVTypeID(ResType));
1524 // <=32-bit integers should be caught by the sdag pattern.
1525 assert(Imm.getBitWidth() > 32);
1526 addNumImm(Imm, MIB);
1527 return MIB.constrainAllUses(TII, TRI, RBI);
1528}
1529
1530bool SPIRVInstructionSelector::selectOpUndef(Register ResVReg,
1531 const SPIRVType *ResType,
1532 MachineInstr &I) const {
1533 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
1534 .addDef(ResVReg)
1535 .addUse(GR.getSPIRVTypeID(ResType))
1536 .constrainAllUses(TII, TRI, RBI);
1537}
1538
1540 assert(MO.isReg());
1541 const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
1542 if (TypeInst->getOpcode() != SPIRV::ASSIGN_TYPE)
1543 return false;
1544 assert(TypeInst->getOperand(1).isReg());
1545 MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
1546 return ImmInst->getOpcode() == TargetOpcode::G_CONSTANT;
1547}
1548
1549static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
1550 const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
1551 MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
1552 assert(ImmInst->getOpcode() == TargetOpcode::G_CONSTANT);
1553 return ImmInst->getOperand(1).getCImm()->getZExtValue();
1554}
1555
1556bool SPIRVInstructionSelector::selectInsertVal(Register ResVReg,
1557 const SPIRVType *ResType,
1558 MachineInstr &I) const {
1559 MachineBasicBlock &BB = *I.getParent();
1560 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeInsert))
1561 .addDef(ResVReg)
1562 .addUse(GR.getSPIRVTypeID(ResType))
1563 // object to insert
1564 .addUse(I.getOperand(3).getReg())
1565 // composite to insert into
1566 .addUse(I.getOperand(2).getReg());
1567 for (unsigned i = 4; i < I.getNumOperands(); i++)
1568 MIB.addImm(foldImm(I.getOperand(i), MRI));
1569 return MIB.constrainAllUses(TII, TRI, RBI);
1570}
1571
1572bool SPIRVInstructionSelector::selectExtractVal(Register ResVReg,
1573 const SPIRVType *ResType,
1574 MachineInstr &I) const {
1575 MachineBasicBlock &BB = *I.getParent();
1576 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
1577 .addDef(ResVReg)
1578 .addUse(GR.getSPIRVTypeID(ResType))
1579 .addUse(I.getOperand(2).getReg());
1580 for (unsigned i = 3; i < I.getNumOperands(); i++)
1581 MIB.addImm(foldImm(I.getOperand(i), MRI));
1582 return MIB.constrainAllUses(TII, TRI, RBI);
1583}
1584
1585bool SPIRVInstructionSelector::selectInsertElt(Register ResVReg,
1586 const SPIRVType *ResType,
1587 MachineInstr &I) const {
1588 if (isImm(I.getOperand(4), MRI))
1589 return selectInsertVal(ResVReg, ResType, I);
1590 MachineBasicBlock &BB = *I.getParent();
1591 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorInsertDynamic))
1592 .addDef(ResVReg)
1593 .addUse(GR.getSPIRVTypeID(ResType))
1594 .addUse(I.getOperand(2).getReg())
1595 .addUse(I.getOperand(3).getReg())
1596 .addUse(I.getOperand(4).getReg())
1597 .constrainAllUses(TII, TRI, RBI);
1598}
1599
1600bool SPIRVInstructionSelector::selectExtractElt(Register ResVReg,
1601 const SPIRVType *ResType,
1602 MachineInstr &I) const {
1603 if (isImm(I.getOperand(3), MRI))
1604 return selectExtractVal(ResVReg, ResType, I);
1605 MachineBasicBlock &BB = *I.getParent();
1606 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorExtractDynamic))
1607 .addDef(ResVReg)
1608 .addUse(GR.getSPIRVTypeID(ResType))
1609 .addUse(I.getOperand(2).getReg())
1610 .addUse(I.getOperand(3).getReg())
1611 .constrainAllUses(TII, TRI, RBI);
1612}
1613
1614bool SPIRVInstructionSelector::selectGEP(Register ResVReg,
1615 const SPIRVType *ResType,
1616 MachineInstr &I) const {
1617 const bool IsGEPInBounds = I.getOperand(2).getImm();
1618
1619 // OpAccessChain could be used for OpenCL, but the SPIRV-LLVM Translator only
1620 // relies on PtrAccessChain, so we'll try not to deviate. For Vulkan however,
1621 // we have to use Op[InBounds]AccessChain.
1622 const unsigned Opcode = STI.isVulkanEnv()
1623 ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
1624 : SPIRV::OpAccessChain)
1625 : (IsGEPInBounds ? SPIRV::OpInBoundsPtrAccessChain
1626 : SPIRV::OpPtrAccessChain);
1627
1628 auto Res = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
1629 .addDef(ResVReg)
1630 .addUse(GR.getSPIRVTypeID(ResType))
1631 // Object to get a pointer to.
1632 .addUse(I.getOperand(3).getReg());
1633 // Adding indices.
1634 const unsigned StartingIndex =
1635 (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
1636 ? 5
1637 : 4;
1638 for (unsigned i = StartingIndex; i < I.getNumExplicitOperands(); ++i)
1639 Res.addUse(I.getOperand(i).getReg());
1640 return Res.constrainAllUses(TII, TRI, RBI);
1641}
1642
1643// Maybe wrap a value into OpSpecConstantOp
1644bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
1645 MachineInstr &I, SmallVector<Register> &CompositeArgs) const {
1646 bool Result = true;
1647 unsigned Lim = I.getNumExplicitOperands();
1648 for (unsigned i = I.getNumExplicitDefs() + 1; i < Lim; ++i) {
1649 Register OpReg = I.getOperand(i).getReg();
1650 SPIRVType *OpDefine = MRI->getVRegDef(OpReg);
1651 SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
1652 if (!OpDefine || !OpType || isConstReg(MRI, OpDefine) ||
1653 OpDefine->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST) {
1654 // The case of G_ADDRSPACE_CAST inside spv_const_composite() is processed
1655 // by selectAddrSpaceCast()
1656 CompositeArgs.push_back(OpReg);
1657 continue;
1658 }
1659 MachineFunction *MF = I.getMF();
1660 Register WrapReg = GR.find(OpDefine, MF);
1661 if (WrapReg.isValid()) {
1662 CompositeArgs.push_back(WrapReg);
1663 continue;
1664 }
1665 // Create a new register for the wrapper
1666 WrapReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1667 GR.add(OpDefine, MF, WrapReg);
1668 CompositeArgs.push_back(WrapReg);
1669 // Decorate the wrapper register and generate a new instruction
1670 MRI->setType(WrapReg, LLT::pointer(0, 32));
1671 GR.assignSPIRVTypeToVReg(OpType, WrapReg, *MF);
1672 MachineBasicBlock &BB = *I.getParent();
1673 Result = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
1674 .addDef(WrapReg)
1675 .addUse(GR.getSPIRVTypeID(OpType))
1676 .addImm(static_cast<uint32_t>(SPIRV::Opcode::Bitcast))
1677 .addUse(OpReg)
1678 .constrainAllUses(TII, TRI, RBI);
1679 if (!Result)
1680 break;
1681 }
1682 return Result;
1683}
1684
1685bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
1686 const SPIRVType *ResType,
1687 MachineInstr &I) const {
1688 MachineBasicBlock &BB = *I.getParent();
1689 Intrinsic::ID IID = cast<GIntrinsic>(I).getIntrinsicID();
1690 switch (IID) {
1691 case Intrinsic::spv_load:
1692 return selectLoad(ResVReg, ResType, I);
1693 case Intrinsic::spv_store:
1694 return selectStore(I);
1695 case Intrinsic::spv_extractv:
1696 return selectExtractVal(ResVReg, ResType, I);
1697 case Intrinsic::spv_insertv:
1698 return selectInsertVal(ResVReg, ResType, I);
1699 case Intrinsic::spv_extractelt:
1700 return selectExtractElt(ResVReg, ResType, I);
1701 case Intrinsic::spv_insertelt:
1702 return selectInsertElt(ResVReg, ResType, I);
1703 case Intrinsic::spv_gep:
1704 return selectGEP(ResVReg, ResType, I);
1705 case Intrinsic::spv_unref_global:
1706 case Intrinsic::spv_init_global: {
1707 MachineInstr *MI = MRI->getVRegDef(I.getOperand(1).getReg());
1708 MachineInstr *Init = I.getNumExplicitOperands() > 2
1709 ? MRI->getVRegDef(I.getOperand(2).getReg())
1710 : nullptr;
1711 assert(MI);
1712 return selectGlobalValue(MI->getOperand(0).getReg(), *MI, Init);
1713 }
1714 case Intrinsic::spv_undef: {
1715 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
1716 .addDef(ResVReg)
1717 .addUse(GR.getSPIRVTypeID(ResType));
1718 return MIB.constrainAllUses(TII, TRI, RBI);
1719 }
1720 case Intrinsic::spv_const_composite: {
1721 // If no values are attached, the composite is null constant.
1722 bool IsNull = I.getNumExplicitDefs() + 1 == I.getNumExplicitOperands();
1723 // Select a proper instruction.
1724 unsigned Opcode = SPIRV::OpConstantNull;
1725 SmallVector<Register> CompositeArgs;
1726 if (!IsNull) {
1727 Opcode = SPIRV::OpConstantComposite;
1728 if (!wrapIntoSpecConstantOp(I, CompositeArgs))
1729 return false;
1730 }
1731 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1732 .addDef(ResVReg)
1733 .addUse(GR.getSPIRVTypeID(ResType));
1734 // skip type MD node we already used when generated assign.type for this
1735 if (!IsNull) {
1736 for (Register OpReg : CompositeArgs)
1737 MIB.addUse(OpReg);
1738 }
1739 return MIB.constrainAllUses(TII, TRI, RBI);
1740 }
1741 case Intrinsic::spv_assign_name: {
1742 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpName));
1743 MIB.addUse(I.getOperand(I.getNumExplicitDefs() + 1).getReg());
1744 for (unsigned i = I.getNumExplicitDefs() + 2;
1745 i < I.getNumExplicitOperands(); ++i) {
1746 MIB.addImm(I.getOperand(i).getImm());
1747 }
1748 return MIB.constrainAllUses(TII, TRI, RBI);
1749 }
1750 case Intrinsic::spv_switch: {
1751 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSwitch));
1752 for (unsigned i = 1; i < I.getNumExplicitOperands(); ++i) {
1753 if (I.getOperand(i).isReg())
1754 MIB.addReg(I.getOperand(i).getReg());
1755 else if (I.getOperand(i).isCImm())
1756 addNumImm(I.getOperand(i).getCImm()->getValue(), MIB);
1757 else if (I.getOperand(i).isMBB())
1758 MIB.addMBB(I.getOperand(i).getMBB());
1759 else
1760 llvm_unreachable("Unexpected OpSwitch operand");
1761 }
1762 return MIB.constrainAllUses(TII, TRI, RBI);
1763 }
1764 case Intrinsic::spv_cmpxchg:
1765 return selectAtomicCmpXchg(ResVReg, ResType, I);
1766 case Intrinsic::spv_unreachable:
1767 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUnreachable));
1768 break;
1769 case Intrinsic::spv_alloca:
1770 return selectFrameIndex(ResVReg, ResType, I);
1771 case Intrinsic::spv_alloca_array:
1772 return selectAllocaArray(ResVReg, ResType, I);
1773 case Intrinsic::spv_assume:
1774 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
1775 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpAssumeTrueKHR))
1776 .addUse(I.getOperand(1).getReg());
1777 break;
1778 case Intrinsic::spv_expect:
1779 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
1780 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExpectKHR))
1781 .addDef(ResVReg)
1782 .addUse(GR.getSPIRVTypeID(ResType))
1783 .addUse(I.getOperand(2).getReg())
1784 .addUse(I.getOperand(3).getReg());
1785 break;
1786 case Intrinsic::spv_thread_id:
1787 return selectSpvThreadId(ResVReg, ResType, I);
1788 case Intrinsic::spv_lifetime_start:
1789 case Intrinsic::spv_lifetime_end: {
1790 unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
1791 : SPIRV::OpLifetimeStop;
1792 int64_t Size = I.getOperand(I.getNumExplicitDefs() + 1).getImm();
1793 Register PtrReg = I.getOperand(I.getNumExplicitDefs() + 2).getReg();
1794 unsigned PonteeOpType = GR.getPointeeTypeOp(PtrReg);
1795 bool IsNonvoidPtr = PonteeOpType != 0 && PonteeOpType != SPIRV::OpTypeVoid;
1796 if (Size == -1 || IsNonvoidPtr)
1797 Size = 0;
1798 BuildMI(BB, I, I.getDebugLoc(), TII.get(Op)).addUse(PtrReg).addImm(Size);
1799 } break;
1800 default: {
1801 std::string DiagMsg;
1802 raw_string_ostream OS(DiagMsg);
1803 I.print(OS);
1804 DiagMsg = "Intrinsic selection not implemented: " + DiagMsg;
1805 report_fatal_error(DiagMsg.c_str(), false);
1806 }
1807 }
1808 return true;
1809}
1810
1811bool SPIRVInstructionSelector::selectAllocaArray(Register ResVReg,
1812 const SPIRVType *ResType,
1813 MachineInstr &I) const {
1814 // there was an allocation size parameter to the allocation instruction
1815 // that is not 1
1816 MachineBasicBlock &BB = *I.getParent();
1817 return BuildMI(BB, I, I.getDebugLoc(),
1818 TII.get(SPIRV::OpVariableLengthArrayINTEL))
1819 .addDef(ResVReg)
1820 .addUse(GR.getSPIRVTypeID(ResType))
1821 .addUse(I.getOperand(2).getReg())
1822 .constrainAllUses(TII, TRI, RBI);
1823}
1824
1825bool SPIRVInstructionSelector::selectFrameIndex(Register ResVReg,
1826 const SPIRVType *ResType,
1827 MachineInstr &I) const {
1828 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
1829 .addDef(ResVReg)
1830 .addUse(GR.getSPIRVTypeID(ResType))
1831 .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function))
1832 .constrainAllUses(TII, TRI, RBI);
1833}
1834
1835bool SPIRVInstructionSelector::selectBranch(MachineInstr &I) const {
1836 // InstructionSelector walks backwards through the instructions. We can use
1837 // both a G_BR and a G_BRCOND to create an OpBranchConditional. We hit G_BR
1838 // first, so can generate an OpBranchConditional here. If there is no
1839 // G_BRCOND, we just use OpBranch for a regular unconditional branch.
1840 const MachineInstr *PrevI = I.getPrevNode();
1841 MachineBasicBlock &MBB = *I.getParent();
1842 if (PrevI != nullptr && PrevI->getOpcode() == TargetOpcode::G_BRCOND) {
1843 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
1844 .addUse(PrevI->getOperand(0).getReg())
1845 .addMBB(PrevI->getOperand(1).getMBB())
1846 .addMBB(I.getOperand(0).getMBB())
1847 .constrainAllUses(TII, TRI, RBI);
1848 }
1849 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranch))
1850 .addMBB(I.getOperand(0).getMBB())
1851 .constrainAllUses(TII, TRI, RBI);
1852}
1853
1854bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &I) const {
1855 // InstructionSelector walks backwards through the instructions. For an
1856 // explicit conditional branch with no fallthrough, we use both a G_BR and a
1857 // G_BRCOND to create an OpBranchConditional. We should hit G_BR first, and
1858 // generate the OpBranchConditional in selectBranch above.
1859 //
1860 // If an OpBranchConditional has been generated, we simply return, as the work
1861 // is alread done. If there is no OpBranchConditional, LLVM must be relying on
1862 // implicit fallthrough to the next basic block, so we need to create an
1863 // OpBranchConditional with an explicit "false" argument pointing to the next
1864 // basic block that LLVM would fall through to.
1865 const MachineInstr *NextI = I.getNextNode();
1866 // Check if this has already been successfully selected.
1867 if (NextI != nullptr && NextI->getOpcode() == SPIRV::OpBranchConditional)
1868 return true;
1869 // Must be relying on implicit block fallthrough, so generate an
1870 // OpBranchConditional with the "next" basic block as the "false" target.
1871 MachineBasicBlock &MBB = *I.getParent();
1872 unsigned NextMBBNum = MBB.getNextNode()->getNumber();
1873 MachineBasicBlock *NextMBB = I.getMF()->getBlockNumbered(NextMBBNum);
1874 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
1875 .addUse(I.getOperand(0).getReg())
1876 .addMBB(I.getOperand(1).getMBB())
1877 .addMBB(NextMBB)
1878 .constrainAllUses(TII, TRI, RBI);
1879}
1880
1881bool SPIRVInstructionSelector::selectPhi(Register ResVReg,
1882 const SPIRVType *ResType,
1883 MachineInstr &I) const {
1884 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpPhi))
1885 .addDef(ResVReg)
1886 .addUse(GR.getSPIRVTypeID(ResType));
1887 const unsigned NumOps = I.getNumOperands();
1888 for (unsigned i = 1; i < NumOps; i += 2) {
1889 MIB.addUse(I.getOperand(i + 0).getReg());
1890 MIB.addMBB(I.getOperand(i + 1).getMBB());
1891 }
1892 return MIB.constrainAllUses(TII, TRI, RBI);
1893}
1894
1895bool SPIRVInstructionSelector::selectGlobalValue(
1896 Register ResVReg, MachineInstr &I, const MachineInstr *Init) const {
1897 // FIXME: don't use MachineIRBuilder here, replace it with BuildMI.
1898 MachineIRBuilder MIRBuilder(I);
1899 const GlobalValue *GV = I.getOperand(1).getGlobal();
1900 Type *GVType = GV->getValueType();
1901 SPIRVType *PointerBaseType;
1902 if (GVType->isArrayTy()) {
1903 SPIRVType *ArrayElementType =
1904 GR.getOrCreateSPIRVType(GVType->getArrayElementType(), MIRBuilder,
1905 SPIRV::AccessQualifier::ReadWrite, false);
1906 PointerBaseType = GR.getOrCreateSPIRVArrayType(
1907 ArrayElementType, GVType->getArrayNumElements(), I, TII);
1908 } else {
1909 PointerBaseType = GR.getOrCreateSPIRVType(
1910 GVType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false);
1911 }
1912 SPIRVType *ResType = GR.getOrCreateSPIRVPointerType(
1913 PointerBaseType, I, TII,
1915
1916 std::string GlobalIdent;
1917 if (!GV->hasName()) {
1918 unsigned &ID = UnnamedGlobalIDs[GV];
1919 if (ID == 0)
1920 ID = UnnamedGlobalIDs.size();
1921 GlobalIdent = "__unnamed_" + Twine(ID).str();
1922 } else {
1923 GlobalIdent = GV->getGlobalIdentifier();
1924 }
1925
1926 // Behaviour of functions as operands depends on availability of the
1927 // corresponding extension (SPV_INTEL_function_pointers):
1928 // - If there is an extension to operate with functions as operands:
1929 // We create a proper constant operand and evaluate a correct type for a
1930 // function pointer.
1931 // - Without the required extension:
1932 // We have functions as operands in tests with blocks of instruction e.g. in
1933 // transcoding/global_block.ll. These operands are not used and should be
1934 // substituted by zero constants. Their type is expected to be always
1935 // OpTypePointer Function %uchar.
1936 if (isa<Function>(GV)) {
1937 const Constant *ConstVal = GV;
1938 MachineBasicBlock &BB = *I.getParent();
1939 Register NewReg = GR.find(ConstVal, GR.CurMF);
1940 if (!NewReg.isValid()) {
1941 Register NewReg = ResVReg;
1942 GR.add(ConstVal, GR.CurMF, NewReg);
1943 const Function *GVFun =
1944 STI.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)
1945 ? dyn_cast<Function>(GV)
1946 : nullptr;
1947 if (GVFun) {
1948 // References to a function via function pointers generate virtual
1949 // registers without a definition. We will resolve it later, during
1950 // module analysis stage.
1951 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1952 Register FuncVReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
1953 MRI->setRegClass(FuncVReg, &SPIRV::IDRegClass);
1955 BuildMI(BB, I, I.getDebugLoc(),
1956 TII.get(SPIRV::OpConstantFunctionPointerINTEL))
1957 .addDef(NewReg)
1958 .addUse(GR.getSPIRVTypeID(ResType))
1959 .addUse(FuncVReg);
1960 // mapping the function pointer to the used Function
1961 GR.recordFunctionPointer(&MB.getInstr()->getOperand(2), GVFun);
1962 return MB.constrainAllUses(TII, TRI, RBI);
1963 }
1964 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1965 .addDef(NewReg)
1966 .addUse(GR.getSPIRVTypeID(ResType))
1967 .constrainAllUses(TII, TRI, RBI);
1968 }
1969 assert(NewReg != ResVReg);
1970 return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
1971 .addDef(ResVReg)
1972 .addUse(NewReg)
1973 .constrainAllUses(TII, TRI, RBI);
1974 }
1975 auto GlobalVar = cast<GlobalVariable>(GV);
1976 assert(GlobalVar->getName() != "llvm.global.annotations");
1977
1978 bool HasInit = GlobalVar->hasInitializer() &&
1979 !isa<UndefValue>(GlobalVar->getInitializer());
1980 // Skip empty declaration for GVs with initilaizers till we get the decl with
1981 // passed initializer.
1982 if (HasInit && !Init)
1983 return true;
1984
1985 unsigned AddrSpace = GV->getAddressSpace();
1986 SPIRV::StorageClass::StorageClass Storage =
1987 addressSpaceToStorageClass(AddrSpace, STI);
1988 bool HasLnkTy = GV->getLinkage() != GlobalValue::InternalLinkage &&
1989 Storage != SPIRV::StorageClass::Function;
1990 SPIRV::LinkageType::LinkageType LnkType =
1992 ? SPIRV::LinkageType::Import
1994 STI.canUseExtension(SPIRV::Extension::SPV_KHR_linkonce_odr)
1995 ? SPIRV::LinkageType::LinkOnceODR
1996 : SPIRV::LinkageType::Export);
1997
1998 Register Reg = GR.buildGlobalVariable(ResVReg, ResType, GlobalIdent, GV,
1999 Storage, Init, GlobalVar->isConstant(),
2000 HasLnkTy, LnkType, MIRBuilder, true);
2001 return Reg.isValid();
2002}
2003
2004bool SPIRVInstructionSelector::selectLog10(Register ResVReg,
2005 const SPIRVType *ResType,
2006 MachineInstr &I) const {
2007 if (STI.canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std)) {
2008 return selectExtInst(ResVReg, ResType, I, CL::log10);
2009 }
2010
2011 // There is no log10 instruction in the GLSL Extended Instruction set, so it
2012 // is implemented as:
2013 // log10(x) = log2(x) * (1 / log2(10))
2014 // = log2(x) * 0.30103
2015
2016 MachineIRBuilder MIRBuilder(I);
2017 MachineBasicBlock &BB = *I.getParent();
2018
2019 // Build log2(x).
2020 Register VarReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
2021 bool Result =
2022 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
2023 .addDef(VarReg)
2024 .addUse(GR.getSPIRVTypeID(ResType))
2025 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
2026 .addImm(GL::Log2)
2027 .add(I.getOperand(1))
2028 .constrainAllUses(TII, TRI, RBI);
2029
2030 // Build 0.30103.
2031 assert(ResType->getOpcode() == SPIRV::OpTypeVector ||
2032 ResType->getOpcode() == SPIRV::OpTypeFloat);
2033 // TODO: Add matrix implementation once supported by the HLSL frontend.
2034 const SPIRVType *SpirvScalarType =
2035 ResType->getOpcode() == SPIRV::OpTypeVector
2036 ? GR.getSPIRVTypeForVReg(ResType->getOperand(1).getReg())
2037 : ResType;
2038 Register ScaleReg =
2039 GR.buildConstantFP(APFloat(0.30103f), MIRBuilder, SpirvScalarType);
2040
2041 // Multiply log2(x) by 0.30103 to get log10(x) result.
2042 auto Opcode = ResType->getOpcode() == SPIRV::OpTypeVector
2043 ? SPIRV::OpVectorTimesScalar
2044 : SPIRV::OpFMulS;
2045 Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
2046 .addDef(ResVReg)
2047 .addUse(GR.getSPIRVTypeID(ResType))
2048 .addUse(VarReg)
2049 .addUse(ScaleReg)
2050 .constrainAllUses(TII, TRI, RBI);
2051
2052 return Result;
2053}
2054
2055bool SPIRVInstructionSelector::selectSpvThreadId(Register ResVReg,
2056 const SPIRVType *ResType,
2057 MachineInstr &I) const {
2058 // DX intrinsic: @llvm.dx.thread.id(i32)
2059 // ID Name Description
2060 // 93 ThreadId reads the thread ID
2061
2062 MachineIRBuilder MIRBuilder(I);
2063 const SPIRVType *U32Type = GR.getOrCreateSPIRVIntegerType(32, MIRBuilder);
2064 const SPIRVType *Vec3Ty =
2065 GR.getOrCreateSPIRVVectorType(U32Type, 3, MIRBuilder);
2066 const SPIRVType *PtrType = GR.getOrCreateSPIRVPointerType(
2067 Vec3Ty, MIRBuilder, SPIRV::StorageClass::Input);
2068
2069 // Create new register for GlobalInvocationID builtin variable.
2070 Register NewRegister =
2071 MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::IDRegClass);
2072 MIRBuilder.getMRI()->setType(NewRegister, LLT::pointer(0, 32));
2073 GR.assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
2074
2075 // Build GlobalInvocationID global variable with the necessary decorations.
2076 Register Variable = GR.buildGlobalVariable(
2077 NewRegister, PtrType,
2078 getLinkStringForBuiltIn(SPIRV::BuiltIn::GlobalInvocationId), nullptr,
2079 SPIRV::StorageClass::Input, nullptr, true, true,
2080 SPIRV::LinkageType::Import, MIRBuilder, false);
2081
2082 // Create new register for loading value.
2083 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2084 Register LoadedRegister = MRI->createVirtualRegister(&SPIRV::IDRegClass);
2085 MIRBuilder.getMRI()->setType(LoadedRegister, LLT::pointer(0, 32));
2086 GR.assignSPIRVTypeToVReg(Vec3Ty, LoadedRegister, MIRBuilder.getMF());
2087
2088 // Load v3uint value from the global variable.
2089 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
2090 .addDef(LoadedRegister)
2091 .addUse(GR.getSPIRVTypeID(Vec3Ty))
2092 .addUse(Variable);
2093
2094 // Get Thread ID index. Expecting operand is a constant immediate value,
2095 // wrapped in a type assignment.
2096 assert(I.getOperand(2).isReg());
2097 Register ThreadIdReg = I.getOperand(2).getReg();
2098 SPIRVType *ConstTy = this->MRI->getVRegDef(ThreadIdReg);
2099 assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
2100 ConstTy->getOperand(1).isReg());
2101 Register ConstReg = ConstTy->getOperand(1).getReg();
2102 const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
2103 assert(Const && Const->getOpcode() == TargetOpcode::G_CONSTANT);
2104 const llvm::APInt &Val = Const->getOperand(1).getCImm()->getValue();
2105 const uint32_t ThreadId = Val.getZExtValue();
2106
2107 // Extract the thread ID from the loaded vector value.
2108 MachineBasicBlock &BB = *I.getParent();
2109 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
2110 .addDef(ResVReg)
2111 .addUse(GR.getSPIRVTypeID(ResType))
2112 .addUse(LoadedRegister)
2113 .addImm(ThreadId);
2114 return MIB.constrainAllUses(TII, TRI, RBI);
2115}
2116
2117namespace llvm {
2120 const SPIRVSubtarget &Subtarget,
2121 const RegisterBankInfo &RBI) {
2122 return new SPIRVInstructionSelector(TM, Subtarget, RBI);
2123}
2124} // namespace llvm
unsigned const MachineRegisterInfo * MRI
#define Success
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Size
#define DEBUG_TYPE
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
const char LLVMTargetMachineRef TM
static StringRef getName(Value *V)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
std::vector< std::pair< SPIRV::InstructionSet::InstructionSet, uint32_t > > ExtInstList
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC)
static void addMemoryOperands(MachineMemOperand *MemOp, MachineInstrBuilder &MIB)
static unsigned getFCmpOpcode(unsigned PredNum)
bool isTypeFoldingSupported(unsigned Opcode)
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static SPIRV::Scope::Scope getScope(SyncScope::ID Ord, SPIRVMachineModuleInfo *MMI)
static unsigned getBoolCmpOpcode(unsigned PredNum)
static unsigned getICmpOpcode(unsigned PredNum)
static bool isConstReg(MachineRegisterInfo *MRI, SPIRVType *OpDef)
static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
static unsigned getPtrCmpOpcode(unsigned Pred)
static unsigned getArrayComponentCount(MachineRegisterInfo *MRI, const SPIRVType *ResType)
raw_pwrite_stream & OS
APInt bitcastToAPInt() const
Definition: APFloat.h:1210
Class for arbitrary precision integers.
Definition: APInt.h:76
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:212
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1491
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:960
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:963
@ ICMP_SLT
signed less than
Definition: InstrTypes.h:989
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:990
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:966
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition: InstrTypes.h:975
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition: InstrTypes.h:964
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition: InstrTypes.h:965
@ ICMP_UGE
unsigned greater or equal
Definition: InstrTypes.h:984
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:983
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:987
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition: InstrTypes.h:974
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:968
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:971
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:985
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition: InstrTypes.h:972
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:967
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:969
@ ICMP_EQ
equal
Definition: InstrTypes.h:981
@ ICMP_NE
not equal
Definition: InstrTypes.h:982
@ ICMP_SGE
signed greater or equal
Definition: InstrTypes.h:988
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:976
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:986
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition: InstrTypes.h:973
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:970
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:268
const APFloat & getValueAPF() const
Definition: Constants.h:311
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:154
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:145
This is an important base class in LLVM.
Definition: Constant.h:41
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:370
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
const Function & getFunction() const
Definition: Function.h:160
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:350
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:274
LinkageTypes getLinkage() const
Definition: GlobalValue.h:545
unsigned getAddressSpace() const
Definition: GlobalValue.h:205
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:655
static std::string getGlobalIdentifier(StringRef Name, GlobalValue::LinkageTypes Linkage, StringRef FileName)
Return the modified name for a global value suitable to be used as the key for a global lookup (e....
Definition: Globals.cpp:144
bool hasAvailableExternallyLinkage() const
Definition: GlobalValue.h:511
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:59
@ LinkOnceODRLinkage
Same, but only replaced by something equivalent.
Definition: GlobalValue.h:55
Type * getValueType() const
Definition: GlobalValue.h:296
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:278
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
Definition: LowLevelType.h:57
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
SyncScope::ID getOrInsertSyncScopeID(StringRef SSN)
getOrInsertSyncScopeID - Maps synchronization scope name to synchronization scope ID.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
MachineModuleInfo & getMMI() const
Helper class to build MachineInstr.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:546
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:556
A description of a memory reference used in the backend.
This class can be derived from and used by targets to hold private target-specific information for ea...
This class contains meta information specific to a module.
const Module * getModule() const
Ty & getObjFileInfo()
Keep track of various per-module pieces of information for backends that would like to do so.
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
Register getReg() const
getReg - Returns the register number.
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
LLVMContext & getContext() const
Get the global data context.
Definition: Module.h:295
Analysis providing profile information.
Holds all the information related to register banks.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isValid() const
Definition: Register.h:116
SPIRVMachineModuleInfo(const MachineModuleInfo &MMI)
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
std::string str() const
Return the twine contents as a std::string.
Definition: Twine.cpp:17
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition: Type.h:252
Type * getArrayElementType() const
Definition: Type.h:404
uint64_t getArrayNumElements() const
bool hasName() const
Definition: Value.h:261
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:316
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:660
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
Definition: LLVMContext.h:54
@ System
Synchronized with respect to all concurrently executing threads.
Definition: LLVMContext.h:57
Reg
All possible values of the reg field in the ModR/M byte.
NodeAddr< DefNode * > Def
Definition: RDFGraph.h:384
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
Definition: SPIRVUtils.cpp:80
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition: Utils.cpp:153
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
Definition: TargetOpcodes.h:30
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
Definition: SPIRVUtils.cpp:241
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
Definition: SPIRVUtils.cpp:190
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Definition: SPIRVUtils.cpp:117
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
Definition: SPIRVUtils.cpp:162
AtomicOrdering
Atomic ordering for LLVM's memory model.
InstructionSelector * createSPIRVInstructionSelector(const SPIRVTargetMachine &TM, const SPIRVSubtarget &Subtarget, const RegisterBankInfo &RBI)
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
Definition: SPIRVUtils.cpp:247
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
Definition: SPIRVUtils.cpp:208
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
#define N