LLVM 20.0.0git
SPIRVInstructionSelector.cpp
Go to the documentation of this file.
1//===- SPIRVInstructionSelector.cpp ------------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the targeting of the InstructionSelector class for
10// SPIRV.
11// TODO: This should be generated by TableGen.
12//
13//===----------------------------------------------------------------------===//
14
17#include "SPIRV.h"
18#include "SPIRVGlobalRegistry.h"
19#include "SPIRVInstrInfo.h"
21#include "SPIRVRegisterInfo.h"
22#include "SPIRVTargetMachine.h"
23#include "SPIRVUtils.h"
24#include "llvm/ADT/APFloat.h"
32#include "llvm/IR/IntrinsicsSPIRV.h"
33#include "llvm/Support/Debug.h"
34
35namespace llvm {
36
38public:
44
46 LLVMContext &CTX = MMI.getModule()->getContext();
47 Work_ItemSSID = CTX.getOrInsertSyncScopeID("work_item");
48 WorkGroupSSID = CTX.getOrInsertSyncScopeID("workgroup");
49 DeviceSSID = CTX.getOrInsertSyncScopeID("device");
50 AllSVMDevicesSSID = CTX.getOrInsertSyncScopeID("all_svm_devices");
51 SubGroupSSID = CTX.getOrInsertSyncScopeID("sub_group");
52 }
53};
54
55} // end namespace llvm
56
57#define DEBUG_TYPE "spirv-isel"
58
59using namespace llvm;
60namespace CL = SPIRV::OpenCLExtInst;
61namespace GL = SPIRV::GLSLExtInst;
62
64 std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
65
66namespace {
67
68#define GET_GLOBALISEL_PREDICATE_BITSET
69#include "SPIRVGenGlobalISel.inc"
70#undef GET_GLOBALISEL_PREDICATE_BITSET
71
72class SPIRVInstructionSelector : public InstructionSelector {
73 const SPIRVSubtarget &STI;
74 const SPIRVInstrInfo &TII;
76 const RegisterBankInfo &RBI;
79 SPIRVMachineModuleInfo *MMI = nullptr;
80
81 /// We need to keep track of the number we give to anonymous global values to
82 /// generate the same name every time when this is needed.
83 mutable DenseMap<const GlobalValue *, unsigned> UnnamedGlobalIDs;
84
85public:
86 SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
87 const SPIRVSubtarget &ST,
88 const RegisterBankInfo &RBI);
89 void setupMF(MachineFunction &MF, GISelKnownBits *KB,
90 CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
91 BlockFrequencyInfo *BFI) override;
92 // Common selection code. Instruction-specific selection occurs in spvSelect.
93 bool select(MachineInstr &I) override;
94 static const char *getName() { return DEBUG_TYPE; }
95
96#define GET_GLOBALISEL_PREDICATES_DECL
97#include "SPIRVGenGlobalISel.inc"
98#undef GET_GLOBALISEL_PREDICATES_DECL
99
100#define GET_GLOBALISEL_TEMPORARIES_DECL
101#include "SPIRVGenGlobalISel.inc"
102#undef GET_GLOBALISEL_TEMPORARIES_DECL
103
104private:
105 // tblgen-erated 'select' implementation, used as the initial selector for
106 // the patterns that don't require complex C++.
107 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
108
109 // All instruction-specific selection that didn't happen in "select()".
110 // Is basically a large Switch/Case delegating to all other select method.
111 bool spvSelect(Register ResVReg, const SPIRVType *ResType,
112 MachineInstr &I) const;
113
114 bool selectGlobalValue(Register ResVReg, MachineInstr &I,
115 const MachineInstr *Init = nullptr) const;
116
117 bool selectUnOpWithSrc(Register ResVReg, const SPIRVType *ResType,
118 MachineInstr &I, Register SrcReg,
119 unsigned Opcode) const;
120 bool selectUnOp(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
121 unsigned Opcode) const;
122
123 bool selectBitcast(Register ResVReg, const SPIRVType *ResType,
124 MachineInstr &I) const;
125
126 bool selectLoad(Register ResVReg, const SPIRVType *ResType,
127 MachineInstr &I) const;
128 bool selectStore(MachineInstr &I) const;
129
130 bool selectStackSave(Register ResVReg, const SPIRVType *ResType,
131 MachineInstr &I) const;
132 bool selectStackRestore(MachineInstr &I) const;
133
134 bool selectMemOperation(Register ResVReg, MachineInstr &I) const;
135
136 bool selectAtomicRMW(Register ResVReg, const SPIRVType *ResType,
137 MachineInstr &I, unsigned NewOpcode,
138 unsigned NegateOpcode = 0) const;
139
140 bool selectAtomicCmpXchg(Register ResVReg, const SPIRVType *ResType,
141 MachineInstr &I) const;
142
143 bool selectFence(MachineInstr &I) const;
144
145 bool selectAddrSpaceCast(Register ResVReg, const SPIRVType *ResType,
146 MachineInstr &I) const;
147
148 bool selectAnyOrAll(Register ResVReg, const SPIRVType *ResType,
149 MachineInstr &I, unsigned OpType) const;
150
151 bool selectAll(Register ResVReg, const SPIRVType *ResType,
152 MachineInstr &I) const;
153
154 bool selectAny(Register ResVReg, const SPIRVType *ResType,
155 MachineInstr &I) const;
156
157 bool selectBitreverse(Register ResVReg, const SPIRVType *ResType,
158 MachineInstr &I) const;
159
160 bool selectConstVector(Register ResVReg, const SPIRVType *ResType,
161 MachineInstr &I) const;
162 bool selectSplatVector(Register ResVReg, const SPIRVType *ResType,
163 MachineInstr &I) const;
164
165 bool selectCmp(Register ResVReg, const SPIRVType *ResType,
166 unsigned comparisonOpcode, MachineInstr &I) const;
167
168 bool selectICmp(Register ResVReg, const SPIRVType *ResType,
169 MachineInstr &I) const;
170 bool selectFCmp(Register ResVReg, const SPIRVType *ResType,
171 MachineInstr &I) const;
172
173 bool selectFmix(Register ResVReg, const SPIRVType *ResType,
174 MachineInstr &I) const;
175
176 bool selectFrac(Register ResVReg, const SPIRVType *ResType,
177 MachineInstr &I) const;
178
179 bool selectRsqrt(Register ResVReg, const SPIRVType *ResType,
180 MachineInstr &I) const;
181
182 void renderImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
183 int OpIdx) const;
184 void renderFImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
185 int OpIdx) const;
186
187 bool selectConst(Register ResVReg, const SPIRVType *ResType, const APInt &Imm,
188 MachineInstr &I) const;
189
190 bool selectSelect(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
191 bool IsSigned) const;
192 bool selectIToF(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
193 bool IsSigned, unsigned Opcode) const;
194 bool selectExt(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
195 bool IsSigned) const;
196
197 bool selectTrunc(Register ResVReg, const SPIRVType *ResType,
198 MachineInstr &I) const;
199
200 bool selectIntToBool(Register IntReg, Register ResVReg, MachineInstr &I,
201 const SPIRVType *intTy, const SPIRVType *boolTy) const;
202
203 bool selectOpUndef(Register ResVReg, const SPIRVType *ResType,
204 MachineInstr &I) const;
205 bool selectFreeze(Register ResVReg, const SPIRVType *ResType,
206 MachineInstr &I) const;
207 bool selectIntrinsic(Register ResVReg, const SPIRVType *ResType,
208 MachineInstr &I) const;
209 bool selectExtractVal(Register ResVReg, const SPIRVType *ResType,
210 MachineInstr &I) const;
211 bool selectInsertVal(Register ResVReg, const SPIRVType *ResType,
212 MachineInstr &I) const;
213 bool selectExtractElt(Register ResVReg, const SPIRVType *ResType,
214 MachineInstr &I) const;
215 bool selectInsertElt(Register ResVReg, const SPIRVType *ResType,
216 MachineInstr &I) const;
217 bool selectGEP(Register ResVReg, const SPIRVType *ResType,
218 MachineInstr &I) const;
219
220 bool selectFrameIndex(Register ResVReg, const SPIRVType *ResType,
221 MachineInstr &I) const;
222 bool selectAllocaArray(Register ResVReg, const SPIRVType *ResType,
223 MachineInstr &I) const;
224
225 bool selectBranch(MachineInstr &I) const;
226 bool selectBranchCond(MachineInstr &I) const;
227
228 bool selectPhi(Register ResVReg, const SPIRVType *ResType,
229 MachineInstr &I) const;
230
231 bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
232 MachineInstr &I, CL::OpenCLExtInst CLInst) const;
233 bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
234 MachineInstr &I, CL::OpenCLExtInst CLInst,
235 GL::GLSLExtInst GLInst) const;
236 bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
237 MachineInstr &I, const ExtInstList &ExtInsts) const;
238
239 bool selectLog10(Register ResVReg, const SPIRVType *ResType,
240 MachineInstr &I) const;
241
242 bool selectSpvThreadId(Register ResVReg, const SPIRVType *ResType,
243 MachineInstr &I) const;
244
246
247 Register buildI32Constant(uint32_t Val, MachineInstr &I,
248 const SPIRVType *ResType = nullptr) const;
249
250 Register buildZerosVal(const SPIRVType *ResType, MachineInstr &I) const;
251 Register buildZerosValF(const SPIRVType *ResType, MachineInstr &I) const;
252 Register buildOnesVal(bool AllOnes, const SPIRVType *ResType,
253 MachineInstr &I) const;
254
255 bool wrapIntoSpecConstantOp(MachineInstr &I,
256 SmallVector<Register> &CompositeArgs) const;
257};
258
259} // end anonymous namespace
260
261#define GET_GLOBALISEL_IMPL
262#include "SPIRVGenGlobalISel.inc"
263#undef GET_GLOBALISEL_IMPL
264
265SPIRVInstructionSelector::SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
266 const SPIRVSubtarget &ST,
267 const RegisterBankInfo &RBI)
268 : InstructionSelector(), STI(ST), TII(*ST.getInstrInfo()),
269 TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
271#include "SPIRVGenGlobalISel.inc"
274#include "SPIRVGenGlobalISel.inc"
276{
277}
278
279void SPIRVInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
280 CodeGenCoverage *CoverageInfo,
282 BlockFrequencyInfo *BFI) {
284 MRI = &MF.getRegInfo();
285 GR.setCurrentFunc(MF);
286 InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
287}
288
289static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI);
290
291// Defined in SPIRVLegalizerInfo.cpp.
292extern bool isTypeFoldingSupported(unsigned Opcode);
293
294bool SPIRVInstructionSelector::select(MachineInstr &I) {
295 assert(I.getParent() && "Instruction should be in a basic block!");
296 assert(I.getParent()->getParent() && "Instruction should be in a function!");
297
298 Register Opcode = I.getOpcode();
299 // If it's not a GMIR instruction, we've selected it already.
300 if (!isPreISelGenericOpcode(Opcode)) {
301 if (Opcode == SPIRV::ASSIGN_TYPE) { // These pseudos aren't needed any more.
302 Register DstReg = I.getOperand(0).getReg();
303 Register SrcReg = I.getOperand(1).getReg();
304 auto *Def = MRI->getVRegDef(SrcReg);
305 if (isTypeFoldingSupported(Def->getOpcode())) {
306 if (MRI->getType(DstReg).isPointer())
307 MRI->setType(DstReg, LLT::scalar(32));
308 bool Res = selectImpl(I, *CoverageInfo);
309 assert(Res || Def->getOpcode() == TargetOpcode::G_CONSTANT);
310 if (Res)
311 return Res;
312 }
313 MRI->replaceRegWith(SrcReg, DstReg);
314 I.removeFromParent();
315 return true;
316 } else if (I.getNumDefs() == 1) {
317 // Make all vregs 32 bits (for SPIR-V IDs).
318 MRI->setType(I.getOperand(0).getReg(), LLT::scalar(32));
319 }
321 }
322
323 if (I.getNumOperands() != I.getNumExplicitOperands()) {
324 LLVM_DEBUG(errs() << "Generic instr has unexpected implicit operands\n");
325 return false;
326 }
327
328 // Common code for getting return reg+type, and removing selected instr
329 // from parent occurs here. Instr-specific selection happens in spvSelect().
330 bool HasDefs = I.getNumDefs() > 0;
331 Register ResVReg = HasDefs ? I.getOperand(0).getReg() : Register(0);
332 SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) : nullptr;
333 assert(!HasDefs || ResType || I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
334 if (spvSelect(ResVReg, ResType, I)) {
335 if (HasDefs) // Make all vregs 32 bits (for SPIR-V IDs).
336 for (unsigned i = 0; i < I.getNumDefs(); ++i)
337 MRI->setType(I.getOperand(i).getReg(), LLT::scalar(32));
338 I.removeFromParent();
339 return true;
340 }
341 return false;
342}
343
344bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
345 const SPIRVType *ResType,
346 MachineInstr &I) const {
347 const unsigned Opcode = I.getOpcode();
348 if (isTypeFoldingSupported(Opcode) && Opcode != TargetOpcode::G_CONSTANT)
349 return selectImpl(I, *CoverageInfo);
350 switch (Opcode) {
351 case TargetOpcode::G_CONSTANT:
352 return selectConst(ResVReg, ResType, I.getOperand(1).getCImm()->getValue(),
353 I);
354 case TargetOpcode::G_GLOBAL_VALUE:
355 return selectGlobalValue(ResVReg, I);
356 case TargetOpcode::G_IMPLICIT_DEF:
357 return selectOpUndef(ResVReg, ResType, I);
358 case TargetOpcode::G_FREEZE:
359 return selectFreeze(ResVReg, ResType, I);
360
361 case TargetOpcode::G_INTRINSIC:
362 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
363 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
364 return selectIntrinsic(ResVReg, ResType, I);
365 case TargetOpcode::G_BITREVERSE:
366 return selectBitreverse(ResVReg, ResType, I);
367
368 case TargetOpcode::G_BUILD_VECTOR:
369 return selectConstVector(ResVReg, ResType, I);
370 case TargetOpcode::G_SPLAT_VECTOR:
371 return selectSplatVector(ResVReg, ResType, I);
372
373 case TargetOpcode::G_SHUFFLE_VECTOR: {
374 MachineBasicBlock &BB = *I.getParent();
375 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorShuffle))
376 .addDef(ResVReg)
377 .addUse(GR.getSPIRVTypeID(ResType))
378 .addUse(I.getOperand(1).getReg())
379 .addUse(I.getOperand(2).getReg());
380 for (auto V : I.getOperand(3).getShuffleMask())
381 MIB.addImm(V);
382 return MIB.constrainAllUses(TII, TRI, RBI);
383 }
384 case TargetOpcode::G_MEMMOVE:
385 case TargetOpcode::G_MEMCPY:
386 case TargetOpcode::G_MEMSET:
387 return selectMemOperation(ResVReg, I);
388
389 case TargetOpcode::G_ICMP:
390 return selectICmp(ResVReg, ResType, I);
391 case TargetOpcode::G_FCMP:
392 return selectFCmp(ResVReg, ResType, I);
393
394 case TargetOpcode::G_FRAME_INDEX:
395 return selectFrameIndex(ResVReg, ResType, I);
396
397 case TargetOpcode::G_LOAD:
398 return selectLoad(ResVReg, ResType, I);
399 case TargetOpcode::G_STORE:
400 return selectStore(I);
401
402 case TargetOpcode::G_BR:
403 return selectBranch(I);
404 case TargetOpcode::G_BRCOND:
405 return selectBranchCond(I);
406
407 case TargetOpcode::G_PHI:
408 return selectPhi(ResVReg, ResType, I);
409
410 case TargetOpcode::G_FPTOSI:
411 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToS);
412 case TargetOpcode::G_FPTOUI:
413 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToU);
414
415 case TargetOpcode::G_SITOFP:
416 return selectIToF(ResVReg, ResType, I, true, SPIRV::OpConvertSToF);
417 case TargetOpcode::G_UITOFP:
418 return selectIToF(ResVReg, ResType, I, false, SPIRV::OpConvertUToF);
419
420 case TargetOpcode::G_CTPOP:
421 return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitCount);
422 case TargetOpcode::G_SMIN:
423 return selectExtInst(ResVReg, ResType, I, CL::s_min, GL::SMin);
424 case TargetOpcode::G_UMIN:
425 return selectExtInst(ResVReg, ResType, I, CL::u_min, GL::UMin);
426
427 case TargetOpcode::G_SMAX:
428 return selectExtInst(ResVReg, ResType, I, CL::s_max, GL::SMax);
429 case TargetOpcode::G_UMAX:
430 return selectExtInst(ResVReg, ResType, I, CL::u_max, GL::UMax);
431
432 case TargetOpcode::G_FMA:
433 return selectExtInst(ResVReg, ResType, I, CL::fma, GL::Fma);
434
435 case TargetOpcode::G_FPOW:
436 return selectExtInst(ResVReg, ResType, I, CL::pow, GL::Pow);
437 case TargetOpcode::G_FPOWI:
438 return selectExtInst(ResVReg, ResType, I, CL::pown);
439
440 case TargetOpcode::G_FEXP:
441 return selectExtInst(ResVReg, ResType, I, CL::exp, GL::Exp);
442 case TargetOpcode::G_FEXP2:
443 return selectExtInst(ResVReg, ResType, I, CL::exp2, GL::Exp2);
444
445 case TargetOpcode::G_FLOG:
446 return selectExtInst(ResVReg, ResType, I, CL::log, GL::Log);
447 case TargetOpcode::G_FLOG2:
448 return selectExtInst(ResVReg, ResType, I, CL::log2, GL::Log2);
449 case TargetOpcode::G_FLOG10:
450 return selectLog10(ResVReg, ResType, I);
451
452 case TargetOpcode::G_FABS:
453 return selectExtInst(ResVReg, ResType, I, CL::fabs, GL::FAbs);
454 case TargetOpcode::G_ABS:
455 return selectExtInst(ResVReg, ResType, I, CL::s_abs, GL::SAbs);
456
457 case TargetOpcode::G_FMINNUM:
458 case TargetOpcode::G_FMINIMUM:
459 return selectExtInst(ResVReg, ResType, I, CL::fmin, GL::NMin);
460 case TargetOpcode::G_FMAXNUM:
461 case TargetOpcode::G_FMAXIMUM:
462 return selectExtInst(ResVReg, ResType, I, CL::fmax, GL::NMax);
463
464 case TargetOpcode::G_FCOPYSIGN:
465 return selectExtInst(ResVReg, ResType, I, CL::copysign);
466
467 case TargetOpcode::G_FCEIL:
468 return selectExtInst(ResVReg, ResType, I, CL::ceil, GL::Ceil);
469 case TargetOpcode::G_FFLOOR:
470 return selectExtInst(ResVReg, ResType, I, CL::floor, GL::Floor);
471
472 case TargetOpcode::G_FCOS:
473 return selectExtInst(ResVReg, ResType, I, CL::cos, GL::Cos);
474 case TargetOpcode::G_FSIN:
475 return selectExtInst(ResVReg, ResType, I, CL::sin, GL::Sin);
476 case TargetOpcode::G_FTAN:
477 return selectExtInst(ResVReg, ResType, I, CL::tan, GL::Tan);
478 case TargetOpcode::G_FACOS:
479 return selectExtInst(ResVReg, ResType, I, CL::acos, GL::Acos);
480 case TargetOpcode::G_FASIN:
481 return selectExtInst(ResVReg, ResType, I, CL::asin, GL::Asin);
482 case TargetOpcode::G_FATAN:
483 return selectExtInst(ResVReg, ResType, I, CL::atan, GL::Atan);
484 case TargetOpcode::G_FCOSH:
485 return selectExtInst(ResVReg, ResType, I, CL::cosh, GL::Cosh);
486 case TargetOpcode::G_FSINH:
487 return selectExtInst(ResVReg, ResType, I, CL::sinh, GL::Sinh);
488 case TargetOpcode::G_FTANH:
489 return selectExtInst(ResVReg, ResType, I, CL::tanh, GL::Tanh);
490
491 case TargetOpcode::G_FSQRT:
492 return selectExtInst(ResVReg, ResType, I, CL::sqrt, GL::Sqrt);
493
494 case TargetOpcode::G_CTTZ:
495 case TargetOpcode::G_CTTZ_ZERO_UNDEF:
496 return selectExtInst(ResVReg, ResType, I, CL::ctz);
497 case TargetOpcode::G_CTLZ:
498 case TargetOpcode::G_CTLZ_ZERO_UNDEF:
499 return selectExtInst(ResVReg, ResType, I, CL::clz);
500
501 case TargetOpcode::G_INTRINSIC_ROUND:
502 return selectExtInst(ResVReg, ResType, I, CL::round, GL::Round);
503 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
504 return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
505 case TargetOpcode::G_INTRINSIC_TRUNC:
506 return selectExtInst(ResVReg, ResType, I, CL::trunc, GL::Trunc);
507 case TargetOpcode::G_FRINT:
508 case TargetOpcode::G_FNEARBYINT:
509 return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
510
511 case TargetOpcode::G_SMULH:
512 return selectExtInst(ResVReg, ResType, I, CL::s_mul_hi);
513 case TargetOpcode::G_UMULH:
514 return selectExtInst(ResVReg, ResType, I, CL::u_mul_hi);
515
516 case TargetOpcode::G_SADDSAT:
517 return selectExtInst(ResVReg, ResType, I, CL::s_add_sat);
518 case TargetOpcode::G_UADDSAT:
519 return selectExtInst(ResVReg, ResType, I, CL::u_add_sat);
520 case TargetOpcode::G_SSUBSAT:
521 return selectExtInst(ResVReg, ResType, I, CL::s_sub_sat);
522 case TargetOpcode::G_USUBSAT:
523 return selectExtInst(ResVReg, ResType, I, CL::u_sub_sat);
524
525 case TargetOpcode::G_SEXT:
526 return selectExt(ResVReg, ResType, I, true);
527 case TargetOpcode::G_ANYEXT:
528 case TargetOpcode::G_ZEXT:
529 return selectExt(ResVReg, ResType, I, false);
530 case TargetOpcode::G_TRUNC:
531 return selectTrunc(ResVReg, ResType, I);
532 case TargetOpcode::G_FPTRUNC:
533 case TargetOpcode::G_FPEXT:
534 return selectUnOp(ResVReg, ResType, I, SPIRV::OpFConvert);
535
536 case TargetOpcode::G_PTRTOINT:
537 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertPtrToU);
538 case TargetOpcode::G_INTTOPTR:
539 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertUToPtr);
540 case TargetOpcode::G_BITCAST:
541 return selectBitcast(ResVReg, ResType, I);
542 case TargetOpcode::G_ADDRSPACE_CAST:
543 return selectAddrSpaceCast(ResVReg, ResType, I);
544 case TargetOpcode::G_PTR_ADD: {
545 // Currently, we get G_PTR_ADD only as a result of translating
546 // global variables, initialized with constant expressions like GV + Const
547 // (see test opencl/basic/progvar_prog_scope_init.ll).
548 // TODO: extend the handler once we have other cases.
549 assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
550 Register GV = I.getOperand(1).getReg();
551 MachineRegisterInfo::def_instr_iterator II = MRI->def_instr_begin(GV);
552 (void)II;
553 assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
554 (*II).getOpcode() == TargetOpcode::COPY ||
555 (*II).getOpcode() == SPIRV::OpVariable) &&
556 isImm(I.getOperand(2), MRI));
557 Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32, I, TII), I);
558 MachineBasicBlock &BB = *I.getParent();
559 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
560 .addDef(ResVReg)
561 .addUse(GR.getSPIRVTypeID(ResType))
562 .addImm(static_cast<uint32_t>(
563 SPIRV::Opcode::InBoundsPtrAccessChain))
564 .addUse(GV)
565 .addUse(Idx)
566 .addUse(I.getOperand(2).getReg());
567 return MIB.constrainAllUses(TII, TRI, RBI);
568 }
569
570 case TargetOpcode::G_ATOMICRMW_OR:
571 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicOr);
572 case TargetOpcode::G_ATOMICRMW_ADD:
573 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicIAdd);
574 case TargetOpcode::G_ATOMICRMW_AND:
575 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicAnd);
576 case TargetOpcode::G_ATOMICRMW_MAX:
577 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMax);
578 case TargetOpcode::G_ATOMICRMW_MIN:
579 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMin);
580 case TargetOpcode::G_ATOMICRMW_SUB:
581 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicISub);
582 case TargetOpcode::G_ATOMICRMW_XOR:
583 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicXor);
584 case TargetOpcode::G_ATOMICRMW_UMAX:
585 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMax);
586 case TargetOpcode::G_ATOMICRMW_UMIN:
587 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMin);
588 case TargetOpcode::G_ATOMICRMW_XCHG:
589 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicExchange);
590 case TargetOpcode::G_ATOMIC_CMPXCHG:
591 return selectAtomicCmpXchg(ResVReg, ResType, I);
592
593 case TargetOpcode::G_ATOMICRMW_FADD:
594 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFAddEXT);
595 case TargetOpcode::G_ATOMICRMW_FSUB:
596 // Translate G_ATOMICRMW_FSUB to OpAtomicFAddEXT with negative value operand
597 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFAddEXT,
598 SPIRV::OpFNegate);
599 case TargetOpcode::G_ATOMICRMW_FMIN:
600 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFMinEXT);
601 case TargetOpcode::G_ATOMICRMW_FMAX:
602 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFMaxEXT);
603
604 case TargetOpcode::G_FENCE:
605 return selectFence(I);
606
607 case TargetOpcode::G_STACKSAVE:
608 return selectStackSave(ResVReg, ResType, I);
609 case TargetOpcode::G_STACKRESTORE:
610 return selectStackRestore(I);
611
612 case TargetOpcode::G_UNMERGE_VALUES:
613 return selectUnmergeValues(I);
614
615 default:
616 return false;
617 }
618}
619
620bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
621 const SPIRVType *ResType,
623 CL::OpenCLExtInst CLInst) const {
624 return selectExtInst(ResVReg, ResType, I,
625 {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
626}
627
628bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
629 const SPIRVType *ResType,
631 CL::OpenCLExtInst CLInst,
632 GL::GLSLExtInst GLInst) const {
633 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
634 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
635 return selectExtInst(ResVReg, ResType, I, ExtInsts);
636}
637
638bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
639 const SPIRVType *ResType,
641 const ExtInstList &Insts) const {
642
643 for (const auto &Ex : Insts) {
644 SPIRV::InstructionSet::InstructionSet Set = Ex.first;
645 uint32_t Opcode = Ex.second;
646 if (STI.canUseExtInstSet(Set)) {
647 MachineBasicBlock &BB = *I.getParent();
648 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
649 .addDef(ResVReg)
650 .addUse(GR.getSPIRVTypeID(ResType))
651 .addImm(static_cast<uint32_t>(Set))
652 .addImm(Opcode);
653 const unsigned NumOps = I.getNumOperands();
654 for (unsigned i = 1; i < NumOps; ++i)
655 MIB.add(I.getOperand(i));
656 return MIB.constrainAllUses(TII, TRI, RBI);
657 }
658 }
659 return false;
660}
661
662bool SPIRVInstructionSelector::selectUnOpWithSrc(Register ResVReg,
663 const SPIRVType *ResType,
665 Register SrcReg,
666 unsigned Opcode) const {
667 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
668 .addDef(ResVReg)
669 .addUse(GR.getSPIRVTypeID(ResType))
670 .addUse(SrcReg)
671 .constrainAllUses(TII, TRI, RBI);
672}
673
674bool SPIRVInstructionSelector::selectUnOp(Register ResVReg,
675 const SPIRVType *ResType,
677 unsigned Opcode) const {
678 if (STI.isOpenCLEnv() && I.getOperand(1).isReg()) {
679 Register SrcReg = I.getOperand(1).getReg();
680 bool IsGV = false;
682 MRI->def_instr_begin(SrcReg);
683 DefIt != MRI->def_instr_end(); DefIt = std::next(DefIt)) {
684 if ((*DefIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE) {
685 IsGV = true;
686 break;
687 }
688 }
689 if (IsGV) {
690 uint32_t SpecOpcode = 0;
691 switch (Opcode) {
692 case SPIRV::OpConvertPtrToU:
693 SpecOpcode = static_cast<uint32_t>(SPIRV::Opcode::ConvertPtrToU);
694 break;
695 case SPIRV::OpConvertUToPtr:
696 SpecOpcode = static_cast<uint32_t>(SPIRV::Opcode::ConvertUToPtr);
697 break;
698 }
699 if (SpecOpcode)
700 return BuildMI(*I.getParent(), I, I.getDebugLoc(),
701 TII.get(SPIRV::OpSpecConstantOp))
702 .addDef(ResVReg)
703 .addUse(GR.getSPIRVTypeID(ResType))
704 .addImm(SpecOpcode)
705 .addUse(SrcReg)
706 .constrainAllUses(TII, TRI, RBI);
707 }
708 }
709 return selectUnOpWithSrc(ResVReg, ResType, I, I.getOperand(1).getReg(),
710 Opcode);
711}
712
713bool SPIRVInstructionSelector::selectBitcast(Register ResVReg,
714 const SPIRVType *ResType,
715 MachineInstr &I) const {
716 Register OpReg = I.getOperand(1).getReg();
717 SPIRVType *OpType = OpReg.isValid() ? GR.getSPIRVTypeForVReg(OpReg) : nullptr;
718 if (!GR.isBitcastCompatible(ResType, OpType))
719 report_fatal_error("incompatible result and operand types in a bitcast");
720 return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
721}
722
723static SPIRV::Scope::Scope getScope(SyncScope::ID Ord,
725 if (Ord == SyncScope::SingleThread || Ord == MMI->Work_ItemSSID)
726 return SPIRV::Scope::Invocation;
727 else if (Ord == SyncScope::System || Ord == MMI->DeviceSSID)
728 return SPIRV::Scope::Device;
729 else if (Ord == MMI->WorkGroupSSID)
730 return SPIRV::Scope::Workgroup;
731 else if (Ord == MMI->AllSVMDevicesSSID)
732 return SPIRV::Scope::CrossDevice;
733 else if (Ord == MMI->SubGroupSSID)
734 return SPIRV::Scope::Subgroup;
735 else
736 // OpenCL approach is: "The functions that do not have memory_scope argument
737 // have the same semantics as the corresponding functions with the
738 // memory_scope argument set to memory_scope_device." See ref.: //
739 // https://registry.khronos.org/OpenCL/specs/3.0-unified/html/OpenCL_C.html#atomic-functions
740 // In our case if the scope is unknown, assuming that SPIR-V code is to be
741 // consumed in an OpenCL environment, we use the same approach and set the
742 // scope to memory_scope_device.
743 return SPIRV::Scope::Device;
744}
745
747 MachineInstrBuilder &MIB) {
748 uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
749 if (MemOp->isVolatile())
750 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
751 if (MemOp->isNonTemporal())
752 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
753 if (MemOp->getAlign().value())
754 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
755
756 if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
757 MIB.addImm(SpvMemOp);
758 if (SpvMemOp & static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
759 MIB.addImm(MemOp->getAlign().value());
760 }
761}
762
764 uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
765 if (Flags & MachineMemOperand::Flags::MOVolatile)
766 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
767 if (Flags & MachineMemOperand::Flags::MONonTemporal)
768 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
769
770 if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None))
771 MIB.addImm(SpvMemOp);
772}
773
774bool SPIRVInstructionSelector::selectLoad(Register ResVReg,
775 const SPIRVType *ResType,
776 MachineInstr &I) const {
777 unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
778 Register Ptr = I.getOperand(1 + OpOffset).getReg();
779 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
780 .addDef(ResVReg)
781 .addUse(GR.getSPIRVTypeID(ResType))
782 .addUse(Ptr);
783 if (!I.getNumMemOperands()) {
784 assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
785 I.getOpcode() ==
786 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
787 addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
788 } else {
789 addMemoryOperands(*I.memoperands_begin(), MIB);
790 }
791 return MIB.constrainAllUses(TII, TRI, RBI);
792}
793
794bool SPIRVInstructionSelector::selectStore(MachineInstr &I) const {
795 unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
796 Register StoreVal = I.getOperand(0 + OpOffset).getReg();
797 Register Ptr = I.getOperand(1 + OpOffset).getReg();
798 MachineBasicBlock &BB = *I.getParent();
799 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpStore))
800 .addUse(Ptr)
801 .addUse(StoreVal);
802 if (!I.getNumMemOperands()) {
803 assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
804 I.getOpcode() ==
805 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
806 addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
807 } else {
808 addMemoryOperands(*I.memoperands_begin(), MIB);
809 }
810 return MIB.constrainAllUses(TII, TRI, RBI);
811}
812
813bool SPIRVInstructionSelector::selectStackSave(Register ResVReg,
814 const SPIRVType *ResType,
815 MachineInstr &I) const {
816 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
818 "llvm.stacksave intrinsic: this instruction requires the following "
819 "SPIR-V extension: SPV_INTEL_variable_length_array",
820 false);
821 MachineBasicBlock &BB = *I.getParent();
822 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSaveMemoryINTEL))
823 .addDef(ResVReg)
824 .addUse(GR.getSPIRVTypeID(ResType))
825 .constrainAllUses(TII, TRI, RBI);
826}
827
828bool SPIRVInstructionSelector::selectStackRestore(MachineInstr &I) const {
829 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
831 "llvm.stackrestore intrinsic: this instruction requires the following "
832 "SPIR-V extension: SPV_INTEL_variable_length_array",
833 false);
834 if (!I.getOperand(0).isReg())
835 return false;
836 MachineBasicBlock &BB = *I.getParent();
837 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpRestoreMemoryINTEL))
838 .addUse(I.getOperand(0).getReg())
839 .constrainAllUses(TII, TRI, RBI);
840}
841
842bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
843 MachineInstr &I) const {
844 MachineBasicBlock &BB = *I.getParent();
845 Register SrcReg = I.getOperand(1).getReg();
846 if (I.getOpcode() == TargetOpcode::G_MEMSET) {
847 assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
848 unsigned Val = getIConstVal(I.getOperand(1).getReg(), MRI);
849 unsigned Num = getIConstVal(I.getOperand(2).getReg(), MRI);
850 SPIRVType *ValTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
851 SPIRVType *ArrTy = GR.getOrCreateSPIRVArrayType(ValTy, Num, I, TII);
852 Register Const = GR.getOrCreateConstIntArray(Val, Num, I, ArrTy, TII);
853 SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType(
854 ArrTy, I, TII, SPIRV::StorageClass::UniformConstant);
855 // TODO: check if we have such GV, add init, use buildGlobalVariable.
856 Function &CurFunction = GR.CurMF->getFunction();
857 Type *LLVMArrTy =
858 ArrayType::get(IntegerType::get(CurFunction.getContext(), 8), Num);
859 // Module takes ownership of the global var.
860 GlobalVariable *GV = new GlobalVariable(*CurFunction.getParent(), LLVMArrTy,
862 Constant::getNullValue(LLVMArrTy));
863 Register VarReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
864 GR.add(GV, GR.CurMF, VarReg);
865
866 buildOpDecorate(VarReg, I, TII, SPIRV::Decoration::Constant, {});
867 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
868 .addDef(VarReg)
869 .addUse(GR.getSPIRVTypeID(VarTy))
870 .addImm(SPIRV::StorageClass::UniformConstant)
871 .addUse(Const)
872 .constrainAllUses(TII, TRI, RBI);
873 SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType(
874 ValTy, I, TII, SPIRV::StorageClass::UniformConstant);
875 SrcReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
876 selectUnOpWithSrc(SrcReg, SourceTy, I, VarReg, SPIRV::OpBitcast);
877 }
878 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCopyMemorySized))
879 .addUse(I.getOperand(0).getReg())
880 .addUse(SrcReg)
881 .addUse(I.getOperand(2).getReg());
882 if (I.getNumMemOperands())
883 addMemoryOperands(*I.memoperands_begin(), MIB);
884 bool Result = MIB.constrainAllUses(TII, TRI, RBI);
885 if (ResVReg.isValid() && ResVReg != MIB->getOperand(0).getReg())
886 BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg)
887 .addUse(MIB->getOperand(0).getReg());
888 return Result;
889}
890
891bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
892 const SPIRVType *ResType,
894 unsigned NewOpcode,
895 unsigned NegateOpcode) const {
896 assert(I.hasOneMemOperand());
897 const MachineMemOperand *MemOp = *I.memoperands_begin();
899 static_cast<uint32_t>(getScope(MemOp->getSyncScopeID(), MMI));
900 Register ScopeReg = buildI32Constant(Scope, I);
901
902 Register Ptr = I.getOperand(1).getReg();
903 // TODO: Changed as it's implemented in the translator. See test/atomicrmw.ll
904 // auto ScSem =
905 // getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr));
906 AtomicOrdering AO = MemOp->getSuccessOrdering();
907 uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
908 Register MemSemReg = buildI32Constant(MemSem /*| ScSem*/, I);
909
910 bool Result = false;
911 Register ValueReg = I.getOperand(2).getReg();
912 if (NegateOpcode != 0) {
913 // Translation with negative value operand is requested
914 Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
915 Result |= selectUnOpWithSrc(TmpReg, ResType, I, ValueReg, NegateOpcode);
916 ValueReg = TmpReg;
917 }
918
919 Result |= BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode))
920 .addDef(ResVReg)
921 .addUse(GR.getSPIRVTypeID(ResType))
922 .addUse(Ptr)
923 .addUse(ScopeReg)
924 .addUse(MemSemReg)
925 .addUse(ValueReg)
926 .constrainAllUses(TII, TRI, RBI);
927 return Result;
928}
929
930bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &I) const {
931 unsigned ArgI = I.getNumOperands() - 1;
932 Register SrcReg =
933 I.getOperand(ArgI).isReg() ? I.getOperand(ArgI).getReg() : Register(0);
934 SPIRVType *DefType =
935 SrcReg.isValid() ? GR.getSPIRVTypeForVReg(SrcReg) : nullptr;
936 if (!DefType || DefType->getOpcode() != SPIRV::OpTypeVector)
938 "cannot select G_UNMERGE_VALUES with a non-vector argument");
939
940 SPIRVType *ScalarType =
941 GR.getSPIRVTypeForVReg(DefType->getOperand(1).getReg());
942 MachineBasicBlock &BB = *I.getParent();
943 bool Res = false;
944 for (unsigned i = 0; i < I.getNumDefs(); ++i) {
945 Register ResVReg = I.getOperand(i).getReg();
946 SPIRVType *ResType = GR.getSPIRVTypeForVReg(ResVReg);
947 if (!ResType) {
948 // There was no "assign type" actions, let's fix this now
949 ResType = ScalarType;
950 MRI->setRegClass(ResVReg, &SPIRV::IDRegClass);
951 MRI->setType(ResVReg, LLT::scalar(GR.getScalarOrVectorBitWidth(ResType)));
952 GR.assignSPIRVTypeToVReg(ResType, ResVReg, *GR.CurMF);
953 }
954 auto MIB =
955 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
956 .addDef(ResVReg)
957 .addUse(GR.getSPIRVTypeID(ResType))
958 .addUse(SrcReg)
959 .addImm(static_cast<int64_t>(i));
960 Res |= MIB.constrainAllUses(TII, TRI, RBI);
961 }
962 return Res;
963}
964
965bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const {
966 AtomicOrdering AO = AtomicOrdering(I.getOperand(0).getImm());
967 uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
968 Register MemSemReg = buildI32Constant(MemSem, I);
969 SyncScope::ID Ord = SyncScope::ID(I.getOperand(1).getImm());
970 uint32_t Scope = static_cast<uint32_t>(getScope(Ord, MMI));
971 Register ScopeReg = buildI32Constant(Scope, I);
972 MachineBasicBlock &BB = *I.getParent();
973 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier))
974 .addUse(ScopeReg)
975 .addUse(MemSemReg)
976 .constrainAllUses(TII, TRI, RBI);
977}
978
979bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
980 const SPIRVType *ResType,
981 MachineInstr &I) const {
982 Register ScopeReg;
983 Register MemSemEqReg;
984 Register MemSemNeqReg;
985 Register Ptr = I.getOperand(2).getReg();
986 if (!isa<GIntrinsic>(I)) {
987 assert(I.hasOneMemOperand());
988 const MachineMemOperand *MemOp = *I.memoperands_begin();
989 unsigned Scope =
990 static_cast<uint32_t>(getScope(MemOp->getSyncScopeID(), MMI));
991 ScopeReg = buildI32Constant(Scope, I);
992
993 unsigned ScSem = static_cast<uint32_t>(
994 getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr)));
995 AtomicOrdering AO = MemOp->getSuccessOrdering();
996 unsigned MemSemEq = static_cast<uint32_t>(getMemSemantics(AO)) | ScSem;
997 MemSemEqReg = buildI32Constant(MemSemEq, I);
998 AtomicOrdering FO = MemOp->getFailureOrdering();
999 unsigned MemSemNeq = static_cast<uint32_t>(getMemSemantics(FO)) | ScSem;
1000 MemSemNeqReg =
1001 MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq, I);
1002 } else {
1003 ScopeReg = I.getOperand(5).getReg();
1004 MemSemEqReg = I.getOperand(6).getReg();
1005 MemSemNeqReg = I.getOperand(7).getReg();
1006 }
1007
1008 Register Cmp = I.getOperand(3).getReg();
1009 Register Val = I.getOperand(4).getReg();
1010 SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
1011 Register ACmpRes = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1012 const DebugLoc &DL = I.getDebugLoc();
1013 bool Result =
1014 BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange))
1015 .addDef(ACmpRes)
1016 .addUse(GR.getSPIRVTypeID(SpvValTy))
1017 .addUse(Ptr)
1018 .addUse(ScopeReg)
1019 .addUse(MemSemEqReg)
1020 .addUse(MemSemNeqReg)
1021 .addUse(Val)
1022 .addUse(Cmp)
1023 .constrainAllUses(TII, TRI, RBI);
1024 Register CmpSuccReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1025 SPIRVType *BoolTy = GR.getOrCreateSPIRVBoolType(I, TII);
1026 Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpIEqual))
1027 .addDef(CmpSuccReg)
1028 .addUse(GR.getSPIRVTypeID(BoolTy))
1029 .addUse(ACmpRes)
1030 .addUse(Cmp)
1031 .constrainAllUses(TII, TRI, RBI);
1032 Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1033 Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
1034 .addDef(TmpReg)
1035 .addUse(GR.getSPIRVTypeID(ResType))
1036 .addUse(ACmpRes)
1037 .addUse(GR.getOrCreateUndef(I, ResType, TII))
1038 .addImm(0)
1039 .constrainAllUses(TII, TRI, RBI);
1040 Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
1041 .addDef(ResVReg)
1042 .addUse(GR.getSPIRVTypeID(ResType))
1043 .addUse(CmpSuccReg)
1044 .addUse(TmpReg)
1045 .addImm(1)
1046 .constrainAllUses(TII, TRI, RBI);
1047 return Result;
1048}
1049
1050static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC) {
1051 switch (SC) {
1052 case SPIRV::StorageClass::Workgroup:
1053 case SPIRV::StorageClass::CrossWorkgroup:
1054 case SPIRV::StorageClass::Function:
1055 return true;
1056 default:
1057 return false;
1058 }
1059}
1060
1061static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC) {
1062 switch (SC) {
1063 case SPIRV::StorageClass::DeviceOnlyINTEL:
1064 case SPIRV::StorageClass::HostOnlyINTEL:
1065 return true;
1066 default:
1067 return false;
1068 }
1069}
1070
1071// In SPIR-V address space casting can only happen to and from the Generic
1072// storage class. We can also only cast Workgroup, CrossWorkgroup, or Function
1073// pointers to and from Generic pointers. As such, we can convert e.g. from
1074// Workgroup to Function by going via a Generic pointer as an intermediary. All
1075// other combinations can only be done by a bitcast, and are probably not safe.
1076bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg,
1077 const SPIRVType *ResType,
1078 MachineInstr &I) const {
1079 // If the AddrSpaceCast user is single and in OpConstantComposite or
1080 // OpVariable, we should select OpSpecConstantOp.
1081 auto UIs = MRI->use_instructions(ResVReg);
1082 if (!UIs.empty() && ++UIs.begin() == UIs.end() &&
1083 (UIs.begin()->getOpcode() == SPIRV::OpConstantComposite ||
1084 UIs.begin()->getOpcode() == SPIRV::OpVariable ||
1085 isSpvIntrinsic(*UIs.begin(), Intrinsic::spv_init_global))) {
1086 Register NewReg = I.getOperand(1).getReg();
1087 MachineBasicBlock &BB = *I.getParent();
1088 SPIRVType *SpvBaseTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
1089 ResType = GR.getOrCreateSPIRVPointerType(SpvBaseTy, I, TII,
1090 SPIRV::StorageClass::Generic);
1091 bool Result =
1092 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
1093 .addDef(ResVReg)
1094 .addUse(GR.getSPIRVTypeID(ResType))
1095 .addImm(static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric))
1096 .addUse(NewReg)
1097 .constrainAllUses(TII, TRI, RBI);
1098 return Result;
1099 }
1100 Register SrcPtr = I.getOperand(1).getReg();
1101 SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
1102 SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtr);
1103 SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResVReg);
1104
1105 // don't generate a cast between identical storage classes
1106 if (SrcSC == DstSC)
1107 return BuildMI(*I.getParent(), I, I.getDebugLoc(),
1108 TII.get(TargetOpcode::COPY))
1109 .addDef(ResVReg)
1110 .addUse(SrcPtr)
1111 .constrainAllUses(TII, TRI, RBI);
1112
1113 // Casting from an eligible pointer to Generic.
1114 if (DstSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(SrcSC))
1115 return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric);
1116 // Casting from Generic to an eligible pointer.
1117 if (SrcSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(DstSC))
1118 return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr);
1119 // Casting between 2 eligible pointers using Generic as an intermediary.
1120 if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) {
1121 Register Tmp = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1122 SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
1123 GR.getPointeeType(SrcPtrTy), I, TII, SPIRV::StorageClass::Generic);
1124 MachineBasicBlock &BB = *I.getParent();
1125 const DebugLoc &DL = I.getDebugLoc();
1126 bool Success = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric))
1127 .addDef(Tmp)
1128 .addUse(GR.getSPIRVTypeID(GenericPtrTy))
1129 .addUse(SrcPtr)
1130 .constrainAllUses(TII, TRI, RBI);
1131 return Success && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr))
1132 .addDef(ResVReg)
1133 .addUse(GR.getSPIRVTypeID(ResType))
1134 .addUse(Tmp)
1135 .constrainAllUses(TII, TRI, RBI);
1136 }
1137
1138 // Check if instructions from the SPV_INTEL_usm_storage_classes extension may
1139 // be applied
1140 if (isUSMStorageClass(SrcSC) && DstSC == SPIRV::StorageClass::CrossWorkgroup)
1141 return selectUnOp(ResVReg, ResType, I,
1142 SPIRV::OpPtrCastToCrossWorkgroupINTEL);
1143 if (SrcSC == SPIRV::StorageClass::CrossWorkgroup && isUSMStorageClass(DstSC))
1144 return selectUnOp(ResVReg, ResType, I,
1145 SPIRV::OpCrossWorkgroupCastToPtrINTEL);
1146 if (isUSMStorageClass(SrcSC) && DstSC == SPIRV::StorageClass::Generic)
1147 return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric);
1148 if (SrcSC == SPIRV::StorageClass::Generic && isUSMStorageClass(DstSC))
1149 return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr);
1150
1151 // Bitcast for pointers requires that the address spaces must match
1152 return false;
1153}
1154
1155static unsigned getFCmpOpcode(unsigned PredNum) {
1156 auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1157 switch (Pred) {
1158 case CmpInst::FCMP_OEQ:
1159 return SPIRV::OpFOrdEqual;
1160 case CmpInst::FCMP_OGE:
1161 return SPIRV::OpFOrdGreaterThanEqual;
1162 case CmpInst::FCMP_OGT:
1163 return SPIRV::OpFOrdGreaterThan;
1164 case CmpInst::FCMP_OLE:
1165 return SPIRV::OpFOrdLessThanEqual;
1166 case CmpInst::FCMP_OLT:
1167 return SPIRV::OpFOrdLessThan;
1168 case CmpInst::FCMP_ONE:
1169 return SPIRV::OpFOrdNotEqual;
1170 case CmpInst::FCMP_ORD:
1171 return SPIRV::OpOrdered;
1172 case CmpInst::FCMP_UEQ:
1173 return SPIRV::OpFUnordEqual;
1174 case CmpInst::FCMP_UGE:
1175 return SPIRV::OpFUnordGreaterThanEqual;
1176 case CmpInst::FCMP_UGT:
1177 return SPIRV::OpFUnordGreaterThan;
1178 case CmpInst::FCMP_ULE:
1179 return SPIRV::OpFUnordLessThanEqual;
1180 case CmpInst::FCMP_ULT:
1181 return SPIRV::OpFUnordLessThan;
1182 case CmpInst::FCMP_UNE:
1183 return SPIRV::OpFUnordNotEqual;
1184 case CmpInst::FCMP_UNO:
1185 return SPIRV::OpUnordered;
1186 default:
1187 llvm_unreachable("Unknown predicate type for FCmp");
1188 }
1189}
1190
1191static unsigned getICmpOpcode(unsigned PredNum) {
1192 auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1193 switch (Pred) {
1194 case CmpInst::ICMP_EQ:
1195 return SPIRV::OpIEqual;
1196 case CmpInst::ICMP_NE:
1197 return SPIRV::OpINotEqual;
1198 case CmpInst::ICMP_SGE:
1199 return SPIRV::OpSGreaterThanEqual;
1200 case CmpInst::ICMP_SGT:
1201 return SPIRV::OpSGreaterThan;
1202 case CmpInst::ICMP_SLE:
1203 return SPIRV::OpSLessThanEqual;
1204 case CmpInst::ICMP_SLT:
1205 return SPIRV::OpSLessThan;
1206 case CmpInst::ICMP_UGE:
1207 return SPIRV::OpUGreaterThanEqual;
1208 case CmpInst::ICMP_UGT:
1209 return SPIRV::OpUGreaterThan;
1210 case CmpInst::ICMP_ULE:
1211 return SPIRV::OpULessThanEqual;
1212 case CmpInst::ICMP_ULT:
1213 return SPIRV::OpULessThan;
1214 default:
1215 llvm_unreachable("Unknown predicate type for ICmp");
1216 }
1217}
1218
1219static unsigned getPtrCmpOpcode(unsigned Pred) {
1220 switch (static_cast<CmpInst::Predicate>(Pred)) {
1221 case CmpInst::ICMP_EQ:
1222 return SPIRV::OpPtrEqual;
1223 case CmpInst::ICMP_NE:
1224 return SPIRV::OpPtrNotEqual;
1225 default:
1226 llvm_unreachable("Unknown predicate type for pointer comparison");
1227 }
1228}
1229
1230// Return the logical operation, or abort if none exists.
1231static unsigned getBoolCmpOpcode(unsigned PredNum) {
1232 auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1233 switch (Pred) {
1234 case CmpInst::ICMP_EQ:
1235 return SPIRV::OpLogicalEqual;
1236 case CmpInst::ICMP_NE:
1237 return SPIRV::OpLogicalNotEqual;
1238 default:
1239 llvm_unreachable("Unknown predicate type for Bool comparison");
1240 }
1241}
1242
1243bool SPIRVInstructionSelector::selectAnyOrAll(Register ResVReg,
1244 const SPIRVType *ResType,
1245 MachineInstr &I,
1246 unsigned OpAnyOrAll) const {
1247 assert(I.getNumOperands() == 3);
1248 assert(I.getOperand(2).isReg());
1249 MachineBasicBlock &BB = *I.getParent();
1250 Register InputRegister = I.getOperand(2).getReg();
1251 SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
1252
1253 if (!InputType)
1254 report_fatal_error("Input Type could not be determined.");
1255
1256 bool IsBoolTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeBool);
1257 bool IsVectorTy = InputType->getOpcode() == SPIRV::OpTypeVector;
1258 if (IsBoolTy && !IsVectorTy) {
1259 assert(ResVReg == I.getOperand(0).getReg());
1260 return BuildMI(*I.getParent(), I, I.getDebugLoc(),
1261 TII.get(TargetOpcode::COPY))
1262 .addDef(ResVReg)
1263 .addUse(InputRegister)
1264 .constrainAllUses(TII, TRI, RBI);
1265 }
1266
1267 bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
1268 unsigned SpirvNotEqualId =
1269 IsFloatTy ? SPIRV::OpFOrdNotEqual : SPIRV::OpINotEqual;
1270 SPIRVType *SpvBoolScalarTy = GR.getOrCreateSPIRVBoolType(I, TII);
1271 SPIRVType *SpvBoolTy = SpvBoolScalarTy;
1272 Register NotEqualReg = ResVReg;
1273
1274 if (IsVectorTy) {
1275 NotEqualReg = IsBoolTy ? InputRegister
1276 : MRI->createVirtualRegister(&SPIRV::IDRegClass);
1277 const unsigned NumElts = InputType->getOperand(2).getImm();
1278 SpvBoolTy = GR.getOrCreateSPIRVVectorType(SpvBoolTy, NumElts, I, TII);
1279 }
1280
1281 if (!IsBoolTy) {
1282 Register ConstZeroReg =
1283 IsFloatTy ? buildZerosValF(InputType, I) : buildZerosVal(InputType, I);
1284
1285 BuildMI(BB, I, I.getDebugLoc(), TII.get(SpirvNotEqualId))
1286 .addDef(NotEqualReg)
1287 .addUse(GR.getSPIRVTypeID(SpvBoolTy))
1288 .addUse(InputRegister)
1289 .addUse(ConstZeroReg)
1290 .constrainAllUses(TII, TRI, RBI);
1291 }
1292
1293 if (!IsVectorTy)
1294 return true;
1295
1296 return BuildMI(BB, I, I.getDebugLoc(), TII.get(OpAnyOrAll))
1297 .addDef(ResVReg)
1298 .addUse(GR.getSPIRVTypeID(SpvBoolScalarTy))
1299 .addUse(NotEqualReg)
1300 .constrainAllUses(TII, TRI, RBI);
1301}
1302
1303bool SPIRVInstructionSelector::selectAll(Register ResVReg,
1304 const SPIRVType *ResType,
1305 MachineInstr &I) const {
1306 return selectAnyOrAll(ResVReg, ResType, I, SPIRV::OpAll);
1307}
1308
1309bool SPIRVInstructionSelector::selectAny(Register ResVReg,
1310 const SPIRVType *ResType,
1311 MachineInstr &I) const {
1312 return selectAnyOrAll(ResVReg, ResType, I, SPIRV::OpAny);
1313}
1314
1315bool SPIRVInstructionSelector::selectFmix(Register ResVReg,
1316 const SPIRVType *ResType,
1317 MachineInstr &I) const {
1318
1319 assert(I.getNumOperands() == 5);
1320 assert(I.getOperand(2).isReg());
1321 assert(I.getOperand(3).isReg());
1322 assert(I.getOperand(4).isReg());
1323 MachineBasicBlock &BB = *I.getParent();
1324
1325 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
1326 .addDef(ResVReg)
1327 .addUse(GR.getSPIRVTypeID(ResType))
1328 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1329 .addImm(GL::FMix)
1330 .addUse(I.getOperand(2).getReg())
1331 .addUse(I.getOperand(3).getReg())
1332 .addUse(I.getOperand(4).getReg())
1333 .constrainAllUses(TII, TRI, RBI);
1334}
1335
1336bool SPIRVInstructionSelector::selectFrac(Register ResVReg,
1337 const SPIRVType *ResType,
1338 MachineInstr &I) const {
1339
1340 assert(I.getNumOperands() == 3);
1341 assert(I.getOperand(2).isReg());
1342 MachineBasicBlock &BB = *I.getParent();
1343
1344 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
1345 .addDef(ResVReg)
1346 .addUse(GR.getSPIRVTypeID(ResType))
1347 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1348 .addImm(GL::Fract)
1349 .addUse(I.getOperand(2).getReg())
1350 .constrainAllUses(TII, TRI, RBI);
1351}
1352
1353bool SPIRVInstructionSelector::selectRsqrt(Register ResVReg,
1354 const SPIRVType *ResType,
1355 MachineInstr &I) const {
1356
1357 assert(I.getNumOperands() == 3);
1358 assert(I.getOperand(2).isReg());
1359 MachineBasicBlock &BB = *I.getParent();
1360
1361 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
1362 .addDef(ResVReg)
1363 .addUse(GR.getSPIRVTypeID(ResType))
1364 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1365 .addImm(GL::InverseSqrt)
1366 .addUse(I.getOperand(2).getReg())
1367 .constrainAllUses(TII, TRI, RBI);
1368}
1369
1370bool SPIRVInstructionSelector::selectBitreverse(Register ResVReg,
1371 const SPIRVType *ResType,
1372 MachineInstr &I) const {
1373 MachineBasicBlock &BB = *I.getParent();
1374 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitReverse))
1375 .addDef(ResVReg)
1376 .addUse(GR.getSPIRVTypeID(ResType))
1377 .addUse(I.getOperand(1).getReg())
1378 .constrainAllUses(TII, TRI, RBI);
1379}
1380
1381bool SPIRVInstructionSelector::selectFreeze(Register ResVReg,
1382 const SPIRVType *ResType,
1383 MachineInstr &I) const {
1384 // There is no way to implement `freeze` correctly without support on SPIR-V
1385 // standard side, but we may at least address a simple (static) case when
1386 // undef/poison value presence is obvious. The main benefit of even
1387 // incomplete `freeze` support is preventing of translation from crashing due
1388 // to lack of support on legalization and instruction selection steps.
1389 if (!I.getOperand(0).isReg() || !I.getOperand(1).isReg())
1390 return false;
1391 Register OpReg = I.getOperand(1).getReg();
1392 if (MachineInstr *Def = MRI->getVRegDef(OpReg)) {
1393 Register Reg;
1394 switch (Def->getOpcode()) {
1395 case SPIRV::ASSIGN_TYPE:
1396 if (MachineInstr *AssignToDef =
1397 MRI->getVRegDef(Def->getOperand(1).getReg())) {
1398 if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1399 Reg = Def->getOperand(2).getReg();
1400 }
1401 break;
1402 case SPIRV::OpUndef:
1403 Reg = Def->getOperand(1).getReg();
1404 break;
1405 }
1406 unsigned DestOpCode;
1407 if (Reg.isValid()) {
1408 DestOpCode = SPIRV::OpConstantNull;
1409 } else {
1410 DestOpCode = TargetOpcode::COPY;
1411 Reg = OpReg;
1412 }
1413 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(DestOpCode))
1414 .addDef(I.getOperand(0).getReg())
1415 .addUse(Reg)
1416 .constrainAllUses(TII, TRI, RBI);
1417 }
1418 return false;
1419}
1420
1421bool SPIRVInstructionSelector::selectConstVector(Register ResVReg,
1422 const SPIRVType *ResType,
1423 MachineInstr &I) const {
1424 // TODO: only const case is supported for now.
1425 assert(std::all_of(
1426 I.operands_begin(), I.operands_end(), [this](const MachineOperand &MO) {
1427 if (MO.isDef())
1428 return true;
1429 if (!MO.isReg())
1430 return false;
1431 SPIRVType *ConstTy = this->MRI->getVRegDef(MO.getReg());
1432 assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
1433 ConstTy->getOperand(1).isReg());
1434 Register ConstReg = ConstTy->getOperand(1).getReg();
1435 const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
1436 assert(Const);
1437 return (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
1438 Const->getOpcode() == TargetOpcode::G_FCONSTANT);
1439 }));
1440
1441 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
1442 TII.get(SPIRV::OpConstantComposite))
1443 .addDef(ResVReg)
1444 .addUse(GR.getSPIRVTypeID(ResType));
1445 for (unsigned i = I.getNumExplicitDefs(); i < I.getNumExplicitOperands(); ++i)
1446 MIB.addUse(I.getOperand(i).getReg());
1447 return MIB.constrainAllUses(TII, TRI, RBI);
1448}
1449
1451 const SPIRVType *ResType) {
1452 Register OpReg = ResType->getOperand(2).getReg();
1453 SPIRVType *OpDef = MRI->getVRegDef(OpReg);
1454 if (!OpDef)
1455 return 0;
1456 if (OpDef->getOpcode() == SPIRV::ASSIGN_TYPE &&
1457 OpDef->getOperand(1).isReg()) {
1458 if (SPIRVType *RefDef = MRI->getVRegDef(OpDef->getOperand(1).getReg()))
1459 OpDef = RefDef;
1460 }
1461 unsigned N = OpDef->getOpcode() == TargetOpcode::G_CONSTANT
1462 ? OpDef->getOperand(1).getCImm()->getValue().getZExtValue()
1463 : 0;
1464 return N;
1465}
1466
1467// Return true if the type represents a constant register
1469 SmallPtrSet<SPIRVType *, 4> &Visited) {
1470 if (OpDef->getOpcode() == SPIRV::ASSIGN_TYPE &&
1471 OpDef->getOperand(1).isReg()) {
1472 if (SPIRVType *RefDef = MRI->getVRegDef(OpDef->getOperand(1).getReg()))
1473 OpDef = RefDef;
1474 }
1475
1476 if (Visited.contains(OpDef))
1477 return true;
1478 Visited.insert(OpDef);
1479
1480 unsigned Opcode = OpDef->getOpcode();
1481 switch (Opcode) {
1482 case TargetOpcode::G_CONSTANT:
1483 case TargetOpcode::G_FCONSTANT:
1484 return true;
1485 case TargetOpcode::G_INTRINSIC:
1486 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1487 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
1488 return cast<GIntrinsic>(*OpDef).getIntrinsicID() ==
1489 Intrinsic::spv_const_composite;
1490 case TargetOpcode::G_BUILD_VECTOR:
1491 case TargetOpcode::G_SPLAT_VECTOR: {
1492 for (unsigned i = OpDef->getNumExplicitDefs(); i < OpDef->getNumOperands();
1493 i++) {
1494 SPIRVType *OpNestedDef =
1495 OpDef->getOperand(i).isReg()
1496 ? MRI->getVRegDef(OpDef->getOperand(i).getReg())
1497 : nullptr;
1498 if (OpNestedDef && !isConstReg(MRI, OpNestedDef, Visited))
1499 return false;
1500 }
1501 return true;
1502 }
1503 }
1504 return false;
1505}
1506
1507// Return true if the virtual register represents a constant
1510 if (SPIRVType *OpDef = MRI->getVRegDef(OpReg))
1511 return isConstReg(MRI, OpDef, Visited);
1512 return false;
1513}
1514
1515bool SPIRVInstructionSelector::selectSplatVector(Register ResVReg,
1516 const SPIRVType *ResType,
1517 MachineInstr &I) const {
1518 unsigned N = 0;
1519 if (ResType->getOpcode() == SPIRV::OpTypeVector)
1520 N = GR.getScalarOrVectorComponentCount(ResType);
1521 else if (ResType->getOpcode() == SPIRV::OpTypeArray)
1522 N = getArrayComponentCount(MRI, ResType);
1523 else
1524 report_fatal_error("Cannot select G_SPLAT_VECTOR with a non-vector result");
1525
1526 unsigned OpIdx = I.getNumExplicitDefs();
1527 if (!I.getOperand(OpIdx).isReg())
1528 report_fatal_error("Unexpected argument in G_SPLAT_VECTOR");
1529
1530 // check if we may construct a constant vector
1531 Register OpReg = I.getOperand(OpIdx).getReg();
1532 bool IsConst = isConstReg(MRI, OpReg);
1533
1534 if (!IsConst && N < 2)
1536 "There must be at least two constituent operands in a vector");
1537
1538 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
1539 TII.get(IsConst ? SPIRV::OpConstantComposite
1540 : SPIRV::OpCompositeConstruct))
1541 .addDef(ResVReg)
1542 .addUse(GR.getSPIRVTypeID(ResType));
1543 for (unsigned i = 0; i < N; ++i)
1544 MIB.addUse(OpReg);
1545 return MIB.constrainAllUses(TII, TRI, RBI);
1546}
1547
1548bool SPIRVInstructionSelector::selectCmp(Register ResVReg,
1549 const SPIRVType *ResType,
1550 unsigned CmpOpc,
1551 MachineInstr &I) const {
1552 Register Cmp0 = I.getOperand(2).getReg();
1553 Register Cmp1 = I.getOperand(3).getReg();
1554 assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
1555 GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
1556 "CMP operands should have the same type");
1557 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CmpOpc))
1558 .addDef(ResVReg)
1559 .addUse(GR.getSPIRVTypeID(ResType))
1560 .addUse(Cmp0)
1561 .addUse(Cmp1)
1562 .constrainAllUses(TII, TRI, RBI);
1563}
1564
1565bool SPIRVInstructionSelector::selectICmp(Register ResVReg,
1566 const SPIRVType *ResType,
1567 MachineInstr &I) const {
1568 auto Pred = I.getOperand(1).getPredicate();
1569 unsigned CmpOpc;
1570
1571 Register CmpOperand = I.getOperand(2).getReg();
1572 if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
1573 CmpOpc = getPtrCmpOpcode(Pred);
1574 else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
1575 CmpOpc = getBoolCmpOpcode(Pred);
1576 else
1577 CmpOpc = getICmpOpcode(Pred);
1578 return selectCmp(ResVReg, ResType, CmpOpc, I);
1579}
1580
1581void SPIRVInstructionSelector::renderFImm32(MachineInstrBuilder &MIB,
1582 const MachineInstr &I,
1583 int OpIdx) const {
1584 assert(I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
1585 "Expected G_FCONSTANT");
1586 const ConstantFP *FPImm = I.getOperand(1).getFPImm();
1587 addNumImm(FPImm->getValueAPF().bitcastToAPInt(), MIB);
1588}
1589
1590void SPIRVInstructionSelector::renderImm32(MachineInstrBuilder &MIB,
1591 const MachineInstr &I,
1592 int OpIdx) const {
1593 assert(I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1594 "Expected G_CONSTANT");
1595 addNumImm(I.getOperand(1).getCImm()->getValue(), MIB);
1596}
1597
1599SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I,
1600 const SPIRVType *ResType) const {
1601 Type *LLVMTy = IntegerType::get(GR.CurMF->getFunction().getContext(), 32);
1602 const SPIRVType *SpvI32Ty =
1603 ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32, I, TII);
1604 // Find a constant in DT or build a new one.
1605 auto ConstInt = ConstantInt::get(LLVMTy, Val);
1606 Register NewReg = GR.find(ConstInt, GR.CurMF);
1607 if (!NewReg.isValid()) {
1608 NewReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
1609 GR.add(ConstInt, GR.CurMF, NewReg);
1611 MachineBasicBlock &BB = *I.getParent();
1612 if (Val == 0) {
1613 MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1614 .addDef(NewReg)
1615 .addUse(GR.getSPIRVTypeID(SpvI32Ty));
1616 } else {
1617 MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
1618 .addDef(NewReg)
1619 .addUse(GR.getSPIRVTypeID(SpvI32Ty))
1620 .addImm(APInt(32, Val).getZExtValue());
1621 }
1623 }
1624 return NewReg;
1625}
1626
1627bool SPIRVInstructionSelector::selectFCmp(Register ResVReg,
1628 const SPIRVType *ResType,
1629 MachineInstr &I) const {
1630 unsigned CmpOp = getFCmpOpcode(I.getOperand(1).getPredicate());
1631 return selectCmp(ResVReg, ResType, CmpOp, I);
1632}
1633
1634Register SPIRVInstructionSelector::buildZerosVal(const SPIRVType *ResType,
1635 MachineInstr &I) const {
1636 // OpenCL uses nulls for Zero. In HLSL we don't use null constants.
1637 bool ZeroAsNull = STI.isOpenCLEnv();
1638 if (ResType->getOpcode() == SPIRV::OpTypeVector)
1639 return GR.getOrCreateConstVector(0UL, I, ResType, TII, ZeroAsNull);
1640 return GR.getOrCreateConstInt(0, I, ResType, TII, ZeroAsNull);
1641}
1642
1643static APFloat getZeroFP(const Type *LLVMFloatTy) {
1644 if (!LLVMFloatTy)
1645 return APFloat::getZero(APFloat::IEEEsingle());
1646 switch (LLVMFloatTy->getScalarType()->getTypeID()) {
1647 case Type::HalfTyID:
1648 return APFloat::getZero(APFloat::IEEEhalf());
1649 default:
1650 case Type::FloatTyID:
1651 return APFloat::getZero(APFloat::IEEEsingle());
1652 case Type::DoubleTyID:
1653 return APFloat::getZero(APFloat::IEEEdouble());
1654 }
1655}
1656
1657Register SPIRVInstructionSelector::buildZerosValF(const SPIRVType *ResType,
1658 MachineInstr &I) const {
1659 // OpenCL uses nulls for Zero. In HLSL we don't use null constants.
1660 bool ZeroAsNull = STI.isOpenCLEnv();
1661 APFloat VZero = getZeroFP(GR.getTypeForSPIRVType(ResType));
1662 if (ResType->getOpcode() == SPIRV::OpTypeVector)
1663 return GR.getOrCreateConstVector(VZero, I, ResType, TII, ZeroAsNull);
1664 return GR.getOrCreateConstFP(VZero, I, ResType, TII, ZeroAsNull);
1665}
1666
1667Register SPIRVInstructionSelector::buildOnesVal(bool AllOnes,
1668 const SPIRVType *ResType,
1669 MachineInstr &I) const {
1670 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1671 APInt One =
1672 AllOnes ? APInt::getAllOnes(BitWidth) : APInt::getOneBitSet(BitWidth, 0);
1673 if (ResType->getOpcode() == SPIRV::OpTypeVector)
1674 return GR.getOrCreateConstVector(One.getZExtValue(), I, ResType, TII);
1675 return GR.getOrCreateConstInt(One.getZExtValue(), I, ResType, TII);
1676}
1677
1678bool SPIRVInstructionSelector::selectSelect(Register ResVReg,
1679 const SPIRVType *ResType,
1680 MachineInstr &I,
1681 bool IsSigned) const {
1682 // To extend a bool, we need to use OpSelect between constants.
1683 Register ZeroReg = buildZerosVal(ResType, I);
1684 Register OneReg = buildOnesVal(IsSigned, ResType, I);
1685 bool IsScalarBool =
1686 GR.isScalarOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool);
1687 unsigned Opcode =
1688 IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectSIVCond;
1689 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
1690 .addDef(ResVReg)
1691 .addUse(GR.getSPIRVTypeID(ResType))
1692 .addUse(I.getOperand(1).getReg())
1693 .addUse(OneReg)
1694 .addUse(ZeroReg)
1695 .constrainAllUses(TII, TRI, RBI);
1696}
1697
1698bool SPIRVInstructionSelector::selectIToF(Register ResVReg,
1699 const SPIRVType *ResType,
1700 MachineInstr &I, bool IsSigned,
1701 unsigned Opcode) const {
1702 Register SrcReg = I.getOperand(1).getReg();
1703 // We can convert bool value directly to float type without OpConvert*ToF,
1704 // however the translator generates OpSelect+OpConvert*ToF, so we do the same.
1705 if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
1706 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1707 SPIRVType *TmpType = GR.getOrCreateSPIRVIntegerType(BitWidth, I, TII);
1708 if (ResType->getOpcode() == SPIRV::OpTypeVector) {
1709 const unsigned NumElts = ResType->getOperand(2).getImm();
1710 TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII);
1711 }
1712 SrcReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1713 selectSelect(SrcReg, TmpType, I, false);
1714 }
1715 return selectUnOpWithSrc(ResVReg, ResType, I, SrcReg, Opcode);
1716}
1717
1718bool SPIRVInstructionSelector::selectExt(Register ResVReg,
1719 const SPIRVType *ResType,
1720 MachineInstr &I, bool IsSigned) const {
1721 Register SrcReg = I.getOperand(1).getReg();
1722 if (GR.isScalarOrVectorOfType(SrcReg, SPIRV::OpTypeBool))
1723 return selectSelect(ResVReg, ResType, I, IsSigned);
1724
1725 SPIRVType *SrcType = GR.getSPIRVTypeForVReg(SrcReg);
1726 if (SrcType == ResType)
1727 return BuildMI(*I.getParent(), I, I.getDebugLoc(),
1728 TII.get(TargetOpcode::COPY))
1729 .addDef(ResVReg)
1730 .addUse(SrcReg)
1731 .constrainAllUses(TII, TRI, RBI);
1732
1733 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1734 return selectUnOp(ResVReg, ResType, I, Opcode);
1735}
1736
1737bool SPIRVInstructionSelector::selectIntToBool(Register IntReg,
1738 Register ResVReg,
1739 MachineInstr &I,
1740 const SPIRVType *IntTy,
1741 const SPIRVType *BoolTy) const {
1742 // To truncate to a bool, we use OpBitwiseAnd 1 and OpINotEqual to zero.
1743 Register BitIntReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1744 bool IsVectorTy = IntTy->getOpcode() == SPIRV::OpTypeVector;
1745 unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
1746 Register Zero = buildZerosVal(IntTy, I);
1747 Register One = buildOnesVal(false, IntTy, I);
1748 MachineBasicBlock &BB = *I.getParent();
1749 BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1750 .addDef(BitIntReg)
1751 .addUse(GR.getSPIRVTypeID(IntTy))
1752 .addUse(IntReg)
1753 .addUse(One)
1754 .constrainAllUses(TII, TRI, RBI);
1755 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
1756 .addDef(ResVReg)
1757 .addUse(GR.getSPIRVTypeID(BoolTy))
1758 .addUse(BitIntReg)
1759 .addUse(Zero)
1760 .constrainAllUses(TII, TRI, RBI);
1761}
1762
1763bool SPIRVInstructionSelector::selectTrunc(Register ResVReg,
1764 const SPIRVType *ResType,
1765 MachineInstr &I) const {
1766 Register IntReg = I.getOperand(1).getReg();
1767 const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
1768 if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool))
1769 return selectIntToBool(IntReg, ResVReg, I, ArgType, ResType);
1770 if (ArgType == ResType)
1771 return BuildMI(*I.getParent(), I, I.getDebugLoc(),
1772 TII.get(TargetOpcode::COPY))
1773 .addDef(ResVReg)
1774 .addUse(IntReg)
1775 .constrainAllUses(TII, TRI, RBI);
1776 bool IsSigned = GR.isScalarOrVectorSigned(ResType);
1777 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1778 return selectUnOp(ResVReg, ResType, I, Opcode);
1779}
1780
1781bool SPIRVInstructionSelector::selectConst(Register ResVReg,
1782 const SPIRVType *ResType,
1783 const APInt &Imm,
1784 MachineInstr &I) const {
1785 unsigned TyOpcode = ResType->getOpcode();
1786 assert(TyOpcode != SPIRV::OpTypePointer || Imm.isZero());
1787 MachineBasicBlock &BB = *I.getParent();
1788 if ((TyOpcode == SPIRV::OpTypePointer || TyOpcode == SPIRV::OpTypeEvent) &&
1789 Imm.isZero())
1790 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1791 .addDef(ResVReg)
1792 .addUse(GR.getSPIRVTypeID(ResType))
1793 .constrainAllUses(TII, TRI, RBI);
1794 if (TyOpcode == SPIRV::OpTypeInt) {
1795 assert(Imm.getBitWidth() <= 64 && "Unsupported integer width!");
1796 Register Reg = GR.getOrCreateConstInt(Imm.getZExtValue(), I, ResType, TII);
1797 if (Reg == ResVReg)
1798 return true;
1799 return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
1800 .addDef(ResVReg)
1801 .addUse(Reg)
1802 .constrainAllUses(TII, TRI, RBI);
1803 }
1804 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
1805 .addDef(ResVReg)
1806 .addUse(GR.getSPIRVTypeID(ResType));
1807 // <=32-bit integers should be caught by the sdag pattern.
1808 assert(Imm.getBitWidth() > 32);
1809 addNumImm(Imm, MIB);
1810 return MIB.constrainAllUses(TII, TRI, RBI);
1811}
1812
1813bool SPIRVInstructionSelector::selectOpUndef(Register ResVReg,
1814 const SPIRVType *ResType,
1815 MachineInstr &I) const {
1816 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
1817 .addDef(ResVReg)
1818 .addUse(GR.getSPIRVTypeID(ResType))
1819 .constrainAllUses(TII, TRI, RBI);
1820}
1821
1823 assert(MO.isReg());
1824 const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
1825 if (TypeInst->getOpcode() == SPIRV::ASSIGN_TYPE) {
1826 assert(TypeInst->getOperand(1).isReg());
1827 MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
1828 return ImmInst->getOpcode() == TargetOpcode::G_CONSTANT;
1829 }
1830 return TypeInst->getOpcode() == SPIRV::OpConstantI;
1831}
1832
1833static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
1834 const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
1835 if (TypeInst->getOpcode() == SPIRV::OpConstantI)
1836 return TypeInst->getOperand(2).getImm();
1837 MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
1838 assert(ImmInst->getOpcode() == TargetOpcode::G_CONSTANT);
1839 return ImmInst->getOperand(1).getCImm()->getZExtValue();
1840}
1841
1842bool SPIRVInstructionSelector::selectInsertVal(Register ResVReg,
1843 const SPIRVType *ResType,
1844 MachineInstr &I) const {
1845 MachineBasicBlock &BB = *I.getParent();
1846 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeInsert))
1847 .addDef(ResVReg)
1848 .addUse(GR.getSPIRVTypeID(ResType))
1849 // object to insert
1850 .addUse(I.getOperand(3).getReg())
1851 // composite to insert into
1852 .addUse(I.getOperand(2).getReg());
1853 for (unsigned i = 4; i < I.getNumOperands(); i++)
1854 MIB.addImm(foldImm(I.getOperand(i), MRI));
1855 return MIB.constrainAllUses(TII, TRI, RBI);
1856}
1857
1858bool SPIRVInstructionSelector::selectExtractVal(Register ResVReg,
1859 const SPIRVType *ResType,
1860 MachineInstr &I) const {
1861 MachineBasicBlock &BB = *I.getParent();
1862 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
1863 .addDef(ResVReg)
1864 .addUse(GR.getSPIRVTypeID(ResType))
1865 .addUse(I.getOperand(2).getReg());
1866 for (unsigned i = 3; i < I.getNumOperands(); i++)
1867 MIB.addImm(foldImm(I.getOperand(i), MRI));
1868 return MIB.constrainAllUses(TII, TRI, RBI);
1869}
1870
1871bool SPIRVInstructionSelector::selectInsertElt(Register ResVReg,
1872 const SPIRVType *ResType,
1873 MachineInstr &I) const {
1874 if (isImm(I.getOperand(4), MRI))
1875 return selectInsertVal(ResVReg, ResType, I);
1876 MachineBasicBlock &BB = *I.getParent();
1877 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorInsertDynamic))
1878 .addDef(ResVReg)
1879 .addUse(GR.getSPIRVTypeID(ResType))
1880 .addUse(I.getOperand(2).getReg())
1881 .addUse(I.getOperand(3).getReg())
1882 .addUse(I.getOperand(4).getReg())
1883 .constrainAllUses(TII, TRI, RBI);
1884}
1885
1886bool SPIRVInstructionSelector::selectExtractElt(Register ResVReg,
1887 const SPIRVType *ResType,
1888 MachineInstr &I) const {
1889 if (isImm(I.getOperand(3), MRI))
1890 return selectExtractVal(ResVReg, ResType, I);
1891 MachineBasicBlock &BB = *I.getParent();
1892 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorExtractDynamic))
1893 .addDef(ResVReg)
1894 .addUse(GR.getSPIRVTypeID(ResType))
1895 .addUse(I.getOperand(2).getReg())
1896 .addUse(I.getOperand(3).getReg())
1897 .constrainAllUses(TII, TRI, RBI);
1898}
1899
1900bool SPIRVInstructionSelector::selectGEP(Register ResVReg,
1901 const SPIRVType *ResType,
1902 MachineInstr &I) const {
1903 const bool IsGEPInBounds = I.getOperand(2).getImm();
1904
1905 // OpAccessChain could be used for OpenCL, but the SPIRV-LLVM Translator only
1906 // relies on PtrAccessChain, so we'll try not to deviate. For Vulkan however,
1907 // we have to use Op[InBounds]AccessChain.
1908 const unsigned Opcode = STI.isVulkanEnv()
1909 ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
1910 : SPIRV::OpAccessChain)
1911 : (IsGEPInBounds ? SPIRV::OpInBoundsPtrAccessChain
1912 : SPIRV::OpPtrAccessChain);
1913
1914 auto Res = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
1915 .addDef(ResVReg)
1916 .addUse(GR.getSPIRVTypeID(ResType))
1917 // Object to get a pointer to.
1918 .addUse(I.getOperand(3).getReg());
1919 // Adding indices.
1920 const unsigned StartingIndex =
1921 (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
1922 ? 5
1923 : 4;
1924 for (unsigned i = StartingIndex; i < I.getNumExplicitOperands(); ++i)
1925 Res.addUse(I.getOperand(i).getReg());
1926 return Res.constrainAllUses(TII, TRI, RBI);
1927}
1928
1929// Maybe wrap a value into OpSpecConstantOp
1930bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
1931 MachineInstr &I, SmallVector<Register> &CompositeArgs) const {
1932 bool Result = true;
1933 unsigned Lim = I.getNumExplicitOperands();
1934 for (unsigned i = I.getNumExplicitDefs() + 1; i < Lim; ++i) {
1935 Register OpReg = I.getOperand(i).getReg();
1936 SPIRVType *OpDefine = MRI->getVRegDef(OpReg);
1937 SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
1939 if (!OpDefine || !OpType || isConstReg(MRI, OpDefine, Visited) ||
1940 OpDefine->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST ||
1941 GR.isAggregateType(OpType)) {
1942 // The case of G_ADDRSPACE_CAST inside spv_const_composite() is processed
1943 // by selectAddrSpaceCast()
1944 CompositeArgs.push_back(OpReg);
1945 continue;
1946 }
1947 MachineFunction *MF = I.getMF();
1948 Register WrapReg = GR.find(OpDefine, MF);
1949 if (WrapReg.isValid()) {
1950 CompositeArgs.push_back(WrapReg);
1951 continue;
1952 }
1953 // Create a new register for the wrapper
1954 WrapReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1955 GR.add(OpDefine, MF, WrapReg);
1956 CompositeArgs.push_back(WrapReg);
1957 // Decorate the wrapper register and generate a new instruction
1958 MRI->setType(WrapReg, LLT::pointer(0, 32));
1959 GR.assignSPIRVTypeToVReg(OpType, WrapReg, *MF);
1960 MachineBasicBlock &BB = *I.getParent();
1961 Result = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
1962 .addDef(WrapReg)
1963 .addUse(GR.getSPIRVTypeID(OpType))
1964 .addImm(static_cast<uint32_t>(SPIRV::Opcode::Bitcast))
1965 .addUse(OpReg)
1966 .constrainAllUses(TII, TRI, RBI);
1967 if (!Result)
1968 break;
1969 }
1970 return Result;
1971}
1972
1973bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
1974 const SPIRVType *ResType,
1975 MachineInstr &I) const {
1976 MachineBasicBlock &BB = *I.getParent();
1977 Intrinsic::ID IID = cast<GIntrinsic>(I).getIntrinsicID();
1978 switch (IID) {
1979 case Intrinsic::spv_load:
1980 return selectLoad(ResVReg, ResType, I);
1981 case Intrinsic::spv_store:
1982 return selectStore(I);
1983 case Intrinsic::spv_extractv:
1984 return selectExtractVal(ResVReg, ResType, I);
1985 case Intrinsic::spv_insertv:
1986 return selectInsertVal(ResVReg, ResType, I);
1987 case Intrinsic::spv_extractelt:
1988 return selectExtractElt(ResVReg, ResType, I);
1989 case Intrinsic::spv_insertelt:
1990 return selectInsertElt(ResVReg, ResType, I);
1991 case Intrinsic::spv_gep:
1992 return selectGEP(ResVReg, ResType, I);
1993 case Intrinsic::spv_unref_global:
1994 case Intrinsic::spv_init_global: {
1995 MachineInstr *MI = MRI->getVRegDef(I.getOperand(1).getReg());
1996 MachineInstr *Init = I.getNumExplicitOperands() > 2
1997 ? MRI->getVRegDef(I.getOperand(2).getReg())
1998 : nullptr;
1999 assert(MI);
2000 return selectGlobalValue(MI->getOperand(0).getReg(), *MI, Init);
2001 }
2002 case Intrinsic::spv_undef: {
2003 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
2004 .addDef(ResVReg)
2005 .addUse(GR.getSPIRVTypeID(ResType));
2006 return MIB.constrainAllUses(TII, TRI, RBI);
2007 }
2008 case Intrinsic::spv_const_composite: {
2009 // If no values are attached, the composite is null constant.
2010 bool IsNull = I.getNumExplicitDefs() + 1 == I.getNumExplicitOperands();
2011 // Select a proper instruction.
2012 unsigned Opcode = SPIRV::OpConstantNull;
2013 SmallVector<Register> CompositeArgs;
2014 if (!IsNull) {
2015 Opcode = SPIRV::OpConstantComposite;
2016 if (!wrapIntoSpecConstantOp(I, CompositeArgs))
2017 return false;
2018 }
2019 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
2020 .addDef(ResVReg)
2021 .addUse(GR.getSPIRVTypeID(ResType));
2022 // skip type MD node we already used when generated assign.type for this
2023 if (!IsNull) {
2024 for (Register OpReg : CompositeArgs)
2025 MIB.addUse(OpReg);
2026 }
2027 return MIB.constrainAllUses(TII, TRI, RBI);
2028 }
2029 case Intrinsic::spv_assign_name: {
2030 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpName));
2031 MIB.addUse(I.getOperand(I.getNumExplicitDefs() + 1).getReg());
2032 for (unsigned i = I.getNumExplicitDefs() + 2;
2033 i < I.getNumExplicitOperands(); ++i) {
2034 MIB.addImm(I.getOperand(i).getImm());
2035 }
2036 return MIB.constrainAllUses(TII, TRI, RBI);
2037 }
2038 case Intrinsic::spv_switch: {
2039 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSwitch));
2040 for (unsigned i = 1; i < I.getNumExplicitOperands(); ++i) {
2041 if (I.getOperand(i).isReg())
2042 MIB.addReg(I.getOperand(i).getReg());
2043 else if (I.getOperand(i).isCImm())
2044 addNumImm(I.getOperand(i).getCImm()->getValue(), MIB);
2045 else if (I.getOperand(i).isMBB())
2046 MIB.addMBB(I.getOperand(i).getMBB());
2047 else
2048 llvm_unreachable("Unexpected OpSwitch operand");
2049 }
2050 return MIB.constrainAllUses(TII, TRI, RBI);
2051 }
2052 case Intrinsic::spv_cmpxchg:
2053 return selectAtomicCmpXchg(ResVReg, ResType, I);
2054 case Intrinsic::spv_unreachable:
2055 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUnreachable));
2056 break;
2057 case Intrinsic::spv_alloca:
2058 return selectFrameIndex(ResVReg, ResType, I);
2059 case Intrinsic::spv_alloca_array:
2060 return selectAllocaArray(ResVReg, ResType, I);
2061 case Intrinsic::spv_assume:
2062 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
2063 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpAssumeTrueKHR))
2064 .addUse(I.getOperand(1).getReg());
2065 break;
2066 case Intrinsic::spv_expect:
2067 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
2068 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExpectKHR))
2069 .addDef(ResVReg)
2070 .addUse(GR.getSPIRVTypeID(ResType))
2071 .addUse(I.getOperand(2).getReg())
2072 .addUse(I.getOperand(3).getReg());
2073 break;
2074 case Intrinsic::spv_thread_id:
2075 return selectSpvThreadId(ResVReg, ResType, I);
2076 case Intrinsic::spv_all:
2077 return selectAll(ResVReg, ResType, I);
2078 case Intrinsic::spv_any:
2079 return selectAny(ResVReg, ResType, I);
2080 case Intrinsic::spv_lerp:
2081 return selectFmix(ResVReg, ResType, I);
2082 case Intrinsic::spv_frac:
2083 return selectFrac(ResVReg, ResType, I);
2084 case Intrinsic::spv_rsqrt:
2085 return selectRsqrt(ResVReg, ResType, I);
2086 case Intrinsic::spv_lifetime_start:
2087 case Intrinsic::spv_lifetime_end: {
2088 unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
2089 : SPIRV::OpLifetimeStop;
2090 int64_t Size = I.getOperand(I.getNumExplicitDefs() + 1).getImm();
2091 Register PtrReg = I.getOperand(I.getNumExplicitDefs() + 2).getReg();
2092 unsigned PonteeOpType = GR.getPointeeTypeOp(PtrReg);
2093 bool IsNonvoidPtr = PonteeOpType != 0 && PonteeOpType != SPIRV::OpTypeVoid;
2094 if (Size == -1 || IsNonvoidPtr)
2095 Size = 0;
2096 BuildMI(BB, I, I.getDebugLoc(), TII.get(Op)).addUse(PtrReg).addImm(Size);
2097 } break;
2098 default: {
2099 std::string DiagMsg;
2100 raw_string_ostream OS(DiagMsg);
2101 I.print(OS);
2102 DiagMsg = "Intrinsic selection not implemented: " + DiagMsg;
2103 report_fatal_error(DiagMsg.c_str(), false);
2104 }
2105 }
2106 return true;
2107}
2108
2109bool SPIRVInstructionSelector::selectAllocaArray(Register ResVReg,
2110 const SPIRVType *ResType,
2111 MachineInstr &I) const {
2112 // there was an allocation size parameter to the allocation instruction
2113 // that is not 1
2114 MachineBasicBlock &BB = *I.getParent();
2115 return BuildMI(BB, I, I.getDebugLoc(),
2116 TII.get(SPIRV::OpVariableLengthArrayINTEL))
2117 .addDef(ResVReg)
2118 .addUse(GR.getSPIRVTypeID(ResType))
2119 .addUse(I.getOperand(2).getReg())
2120 .constrainAllUses(TII, TRI, RBI);
2121}
2122
2123bool SPIRVInstructionSelector::selectFrameIndex(Register ResVReg,
2124 const SPIRVType *ResType,
2125 MachineInstr &I) const {
2126 // Change order of instructions if needed: all OpVariable instructions in a
2127 // function must be the first instructions in the first block
2128 MachineFunction *MF = I.getParent()->getParent();
2129 MachineBasicBlock *MBB = &MF->front();
2130 auto It = MBB->SkipPHIsAndLabels(MBB->begin()), E = MBB->end();
2131 bool IsHeader = false;
2132 unsigned Opcode;
2133 for (; It != E && It != I; ++It) {
2134 Opcode = It->getOpcode();
2135 if (Opcode == SPIRV::OpFunction || Opcode == SPIRV::OpFunctionParameter) {
2136 IsHeader = true;
2137 } else if (IsHeader &&
2138 !(Opcode == SPIRV::ASSIGN_TYPE || Opcode == SPIRV::OpLabel)) {
2139 ++It;
2140 break;
2141 }
2142 }
2143 return BuildMI(*MBB, It, It->getDebugLoc(), TII.get(SPIRV::OpVariable))
2144 .addDef(ResVReg)
2145 .addUse(GR.getSPIRVTypeID(ResType))
2146 .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function))
2147 .constrainAllUses(TII, TRI, RBI);
2148}
2149
2150bool SPIRVInstructionSelector::selectBranch(MachineInstr &I) const {
2151 // InstructionSelector walks backwards through the instructions. We can use
2152 // both a G_BR and a G_BRCOND to create an OpBranchConditional. We hit G_BR
2153 // first, so can generate an OpBranchConditional here. If there is no
2154 // G_BRCOND, we just use OpBranch for a regular unconditional branch.
2155 const MachineInstr *PrevI = I.getPrevNode();
2156 MachineBasicBlock &MBB = *I.getParent();
2157 if (PrevI != nullptr && PrevI->getOpcode() == TargetOpcode::G_BRCOND) {
2158 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
2159 .addUse(PrevI->getOperand(0).getReg())
2160 .addMBB(PrevI->getOperand(1).getMBB())
2161 .addMBB(I.getOperand(0).getMBB())
2162 .constrainAllUses(TII, TRI, RBI);
2163 }
2164 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranch))
2165 .addMBB(I.getOperand(0).getMBB())
2166 .constrainAllUses(TII, TRI, RBI);
2167}
2168
2169bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &I) const {
2170 // InstructionSelector walks backwards through the instructions. For an
2171 // explicit conditional branch with no fallthrough, we use both a G_BR and a
2172 // G_BRCOND to create an OpBranchConditional. We should hit G_BR first, and
2173 // generate the OpBranchConditional in selectBranch above.
2174 //
2175 // If an OpBranchConditional has been generated, we simply return, as the work
2176 // is alread done. If there is no OpBranchConditional, LLVM must be relying on
2177 // implicit fallthrough to the next basic block, so we need to create an
2178 // OpBranchConditional with an explicit "false" argument pointing to the next
2179 // basic block that LLVM would fall through to.
2180 const MachineInstr *NextI = I.getNextNode();
2181 // Check if this has already been successfully selected.
2182 if (NextI != nullptr && NextI->getOpcode() == SPIRV::OpBranchConditional)
2183 return true;
2184 // Must be relying on implicit block fallthrough, so generate an
2185 // OpBranchConditional with the "next" basic block as the "false" target.
2186 MachineBasicBlock &MBB = *I.getParent();
2187 unsigned NextMBBNum = MBB.getNextNode()->getNumber();
2188 MachineBasicBlock *NextMBB = I.getMF()->getBlockNumbered(NextMBBNum);
2189 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
2190 .addUse(I.getOperand(0).getReg())
2191 .addMBB(I.getOperand(1).getMBB())
2192 .addMBB(NextMBB)
2193 .constrainAllUses(TII, TRI, RBI);
2194}
2195
2196bool SPIRVInstructionSelector::selectPhi(Register ResVReg,
2197 const SPIRVType *ResType,
2198 MachineInstr &I) const {
2199 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpPhi))
2200 .addDef(ResVReg)
2201 .addUse(GR.getSPIRVTypeID(ResType));
2202 const unsigned NumOps = I.getNumOperands();
2203 for (unsigned i = 1; i < NumOps; i += 2) {
2204 MIB.addUse(I.getOperand(i + 0).getReg());
2205 MIB.addMBB(I.getOperand(i + 1).getMBB());
2206 }
2207 return MIB.constrainAllUses(TII, TRI, RBI);
2208}
2209
2210bool SPIRVInstructionSelector::selectGlobalValue(
2211 Register ResVReg, MachineInstr &I, const MachineInstr *Init) const {
2212 // FIXME: don't use MachineIRBuilder here, replace it with BuildMI.
2213 MachineIRBuilder MIRBuilder(I);
2214 const GlobalValue *GV = I.getOperand(1).getGlobal();
2215 Type *GVType = toTypedPointer(GR.getDeducedGlobalValueType(GV));
2216 SPIRVType *PointerBaseType;
2217 if (GVType->isArrayTy()) {
2218 SPIRVType *ArrayElementType =
2219 GR.getOrCreateSPIRVType(GVType->getArrayElementType(), MIRBuilder,
2220 SPIRV::AccessQualifier::ReadWrite, false);
2221 PointerBaseType = GR.getOrCreateSPIRVArrayType(
2222 ArrayElementType, GVType->getArrayNumElements(), I, TII);
2223 } else {
2224 PointerBaseType = GR.getOrCreateSPIRVType(
2225 GVType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false);
2226 }
2227 SPIRVType *ResType = GR.getOrCreateSPIRVPointerType(
2228 PointerBaseType, I, TII,
2230
2231 std::string GlobalIdent;
2232 if (!GV->hasName()) {
2233 unsigned &ID = UnnamedGlobalIDs[GV];
2234 if (ID == 0)
2235 ID = UnnamedGlobalIDs.size();
2236 GlobalIdent = "__unnamed_" + Twine(ID).str();
2237 } else {
2238 GlobalIdent = GV->getGlobalIdentifier();
2239 }
2240
2241 // Behaviour of functions as operands depends on availability of the
2242 // corresponding extension (SPV_INTEL_function_pointers):
2243 // - If there is an extension to operate with functions as operands:
2244 // We create a proper constant operand and evaluate a correct type for a
2245 // function pointer.
2246 // - Without the required extension:
2247 // We have functions as operands in tests with blocks of instruction e.g. in
2248 // transcoding/global_block.ll. These operands are not used and should be
2249 // substituted by zero constants. Their type is expected to be always
2250 // OpTypePointer Function %uchar.
2251 if (isa<Function>(GV)) {
2252 const Constant *ConstVal = GV;
2253 MachineBasicBlock &BB = *I.getParent();
2254 Register NewReg = GR.find(ConstVal, GR.CurMF);
2255 if (!NewReg.isValid()) {
2256 Register NewReg = ResVReg;
2257 GR.add(ConstVal, GR.CurMF, NewReg);
2258 const Function *GVFun =
2259 STI.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)
2260 ? dyn_cast<Function>(GV)
2261 : nullptr;
2262 if (GVFun) {
2263 // References to a function via function pointers generate virtual
2264 // registers without a definition. We will resolve it later, during
2265 // module analysis stage.
2266 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2267 Register FuncVReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
2268 MRI->setRegClass(FuncVReg, &SPIRV::IDRegClass);
2270 BuildMI(BB, I, I.getDebugLoc(),
2271 TII.get(SPIRV::OpConstantFunctionPointerINTEL))
2272 .addDef(NewReg)
2273 .addUse(GR.getSPIRVTypeID(ResType))
2274 .addUse(FuncVReg);
2275 // mapping the function pointer to the used Function
2276 GR.recordFunctionPointer(&MB.getInstr()->getOperand(2), GVFun);
2277 return MB.constrainAllUses(TII, TRI, RBI);
2278 }
2279 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
2280 .addDef(NewReg)
2281 .addUse(GR.getSPIRVTypeID(ResType))
2282 .constrainAllUses(TII, TRI, RBI);
2283 }
2284 assert(NewReg != ResVReg);
2285 return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
2286 .addDef(ResVReg)
2287 .addUse(NewReg)
2288 .constrainAllUses(TII, TRI, RBI);
2289 }
2290 auto GlobalVar = cast<GlobalVariable>(GV);
2291 assert(GlobalVar->getName() != "llvm.global.annotations");
2292
2293 bool HasInit = GlobalVar->hasInitializer() &&
2294 !isa<UndefValue>(GlobalVar->getInitializer());
2295 // Skip empty declaration for GVs with initilaizers till we get the decl with
2296 // passed initializer.
2297 if (HasInit && !Init)
2298 return true;
2299
2300 unsigned AddrSpace = GV->getAddressSpace();
2301 SPIRV::StorageClass::StorageClass Storage =
2302 addressSpaceToStorageClass(AddrSpace, STI);
2303 bool HasLnkTy = GV->getLinkage() != GlobalValue::InternalLinkage &&
2304 Storage != SPIRV::StorageClass::Function;
2305 SPIRV::LinkageType::LinkageType LnkType =
2307 ? SPIRV::LinkageType::Import
2309 STI.canUseExtension(SPIRV::Extension::SPV_KHR_linkonce_odr)
2310 ? SPIRV::LinkageType::LinkOnceODR
2311 : SPIRV::LinkageType::Export);
2312
2313 Register Reg = GR.buildGlobalVariable(ResVReg, ResType, GlobalIdent, GV,
2314 Storage, Init, GlobalVar->isConstant(),
2315 HasLnkTy, LnkType, MIRBuilder, true);
2316 return Reg.isValid();
2317}
2318
2319bool SPIRVInstructionSelector::selectLog10(Register ResVReg,
2320 const SPIRVType *ResType,
2321 MachineInstr &I) const {
2322 if (STI.canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std)) {
2323 return selectExtInst(ResVReg, ResType, I, CL::log10);
2324 }
2325
2326 // There is no log10 instruction in the GLSL Extended Instruction set, so it
2327 // is implemented as:
2328 // log10(x) = log2(x) * (1 / log2(10))
2329 // = log2(x) * 0.30103
2330
2331 MachineIRBuilder MIRBuilder(I);
2332 MachineBasicBlock &BB = *I.getParent();
2333
2334 // Build log2(x).
2335 Register VarReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
2336 bool Result =
2337 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
2338 .addDef(VarReg)
2339 .addUse(GR.getSPIRVTypeID(ResType))
2340 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
2341 .addImm(GL::Log2)
2342 .add(I.getOperand(1))
2343 .constrainAllUses(TII, TRI, RBI);
2344
2345 // Build 0.30103.
2346 assert(ResType->getOpcode() == SPIRV::OpTypeVector ||
2347 ResType->getOpcode() == SPIRV::OpTypeFloat);
2348 // TODO: Add matrix implementation once supported by the HLSL frontend.
2349 const SPIRVType *SpirvScalarType =
2350 ResType->getOpcode() == SPIRV::OpTypeVector
2351 ? GR.getSPIRVTypeForVReg(ResType->getOperand(1).getReg())
2352 : ResType;
2353 Register ScaleReg =
2354 GR.buildConstantFP(APFloat(0.30103f), MIRBuilder, SpirvScalarType);
2355
2356 // Multiply log2(x) by 0.30103 to get log10(x) result.
2357 auto Opcode = ResType->getOpcode() == SPIRV::OpTypeVector
2358 ? SPIRV::OpVectorTimesScalar
2359 : SPIRV::OpFMulS;
2360 Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
2361 .addDef(ResVReg)
2362 .addUse(GR.getSPIRVTypeID(ResType))
2363 .addUse(VarReg)
2364 .addUse(ScaleReg)
2365 .constrainAllUses(TII, TRI, RBI);
2366
2367 return Result;
2368}
2369
2370bool SPIRVInstructionSelector::selectSpvThreadId(Register ResVReg,
2371 const SPIRVType *ResType,
2372 MachineInstr &I) const {
2373 // DX intrinsic: @llvm.dx.thread.id(i32)
2374 // ID Name Description
2375 // 93 ThreadId reads the thread ID
2376
2377 MachineIRBuilder MIRBuilder(I);
2378 const SPIRVType *U32Type = GR.getOrCreateSPIRVIntegerType(32, MIRBuilder);
2379 const SPIRVType *Vec3Ty =
2380 GR.getOrCreateSPIRVVectorType(U32Type, 3, MIRBuilder);
2381 const SPIRVType *PtrType = GR.getOrCreateSPIRVPointerType(
2382 Vec3Ty, MIRBuilder, SPIRV::StorageClass::Input);
2383
2384 // Create new register for GlobalInvocationID builtin variable.
2385 Register NewRegister =
2386 MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::IDRegClass);
2387 MIRBuilder.getMRI()->setType(NewRegister, LLT::pointer(0, 32));
2388 GR.assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
2389
2390 // Build GlobalInvocationID global variable with the necessary decorations.
2391 Register Variable = GR.buildGlobalVariable(
2392 NewRegister, PtrType,
2393 getLinkStringForBuiltIn(SPIRV::BuiltIn::GlobalInvocationId), nullptr,
2394 SPIRV::StorageClass::Input, nullptr, true, true,
2395 SPIRV::LinkageType::Import, MIRBuilder, false);
2396
2397 // Create new register for loading value.
2398 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2399 Register LoadedRegister = MRI->createVirtualRegister(&SPIRV::IDRegClass);
2400 MIRBuilder.getMRI()->setType(LoadedRegister, LLT::pointer(0, 32));
2401 GR.assignSPIRVTypeToVReg(Vec3Ty, LoadedRegister, MIRBuilder.getMF());
2402
2403 // Load v3uint value from the global variable.
2404 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
2405 .addDef(LoadedRegister)
2406 .addUse(GR.getSPIRVTypeID(Vec3Ty))
2407 .addUse(Variable);
2408
2409 // Get Thread ID index. Expecting operand is a constant immediate value,
2410 // wrapped in a type assignment.
2411 assert(I.getOperand(2).isReg());
2412 Register ThreadIdReg = I.getOperand(2).getReg();
2413 SPIRVType *ConstTy = this->MRI->getVRegDef(ThreadIdReg);
2414 assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
2415 ConstTy->getOperand(1).isReg());
2416 Register ConstReg = ConstTy->getOperand(1).getReg();
2417 const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
2418 assert(Const && Const->getOpcode() == TargetOpcode::G_CONSTANT);
2419 const llvm::APInt &Val = Const->getOperand(1).getCImm()->getValue();
2420 const uint32_t ThreadId = Val.getZExtValue();
2421
2422 // Extract the thread ID from the loaded vector value.
2423 MachineBasicBlock &BB = *I.getParent();
2424 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
2425 .addDef(ResVReg)
2426 .addUse(GR.getSPIRVTypeID(ResType))
2427 .addUse(LoadedRegister)
2428 .addImm(ThreadId);
2429 return MIB.constrainAllUses(TII, TRI, RBI);
2430}
2431
2432namespace llvm {
2435 const SPIRVSubtarget &Subtarget,
2436 const RegisterBankInfo &RBI) {
2437 return new SPIRVInstructionSelector(TM, Subtarget, RBI);
2438}
2439} // namespace llvm
unsigned const MachineRegisterInfo * MRI
#define Success
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Size
#define DEBUG_TYPE
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
const char LLVMTargetMachineRef TM
static StringRef getName(Value *V)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
std::vector< std::pair< SPIRV::InstructionSet::InstructionSet, uint32_t > > ExtInstList
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC)
static APFloat getZeroFP(const Type *LLVMFloatTy)
static void addMemoryOperands(MachineMemOperand *MemOp, MachineInstrBuilder &MIB)
static unsigned getFCmpOpcode(unsigned PredNum)
bool isTypeFoldingSupported(unsigned Opcode)
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static SPIRV::Scope::Scope getScope(SyncScope::ID Ord, SPIRVMachineModuleInfo *MMI)
static unsigned getBoolCmpOpcode(unsigned PredNum)
static unsigned getICmpOpcode(unsigned PredNum)
static bool isConstReg(MachineRegisterInfo *MRI, SPIRVType *OpDef, SmallPtrSet< SPIRVType *, 4 > &Visited)
static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
static unsigned getPtrCmpOpcode(unsigned Pred)
static unsigned getArrayComponentCount(MachineRegisterInfo *MRI, const SPIRVType *ResType)
raw_pwrite_stream & OS
APInt bitcastToAPInt() const
Definition: APFloat.h:1260
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Definition: APFloat.h:988
Class for arbitrary precision integers.
Definition: APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:214
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1500
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:757
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:760
@ ICMP_SLT
signed less than
Definition: InstrTypes.h:786
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:787
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:763
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition: InstrTypes.h:772
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition: InstrTypes.h:761
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition: InstrTypes.h:762
@ ICMP_UGE
unsigned greater or equal
Definition: InstrTypes.h:781
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:780
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:784
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition: InstrTypes.h:771
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:765
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:768
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:782
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition: InstrTypes.h:769
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:764
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:766
@ ICMP_EQ
equal
Definition: InstrTypes.h:778
@ ICMP_NE
not equal
Definition: InstrTypes.h:779
@ ICMP_SGE
signed greater or equal
Definition: InstrTypes.h:785
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:773
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:783
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition: InstrTypes.h:770
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:767
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:269
const APFloat & getValueAPF() const
Definition: Constants.h:312
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:155
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:146
This is an important base class in LLVM.
Definition: Constant.h:42
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:370
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
const Function & getFunction() const
Definition: Function.h:163
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:358
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:290
LinkageTypes getLinkage() const
Definition: GlobalValue.h:546
unsigned getAddressSpace() const
Definition: GlobalValue.h:205
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:656
static std::string getGlobalIdentifier(StringRef Name, GlobalValue::LinkageTypes Linkage, StringRef FileName)
Return the modified name for a global value suitable to be used as the key for a global lookup (e....
Definition: Globals.cpp:153
bool hasAvailableExternallyLinkage() const
Definition: GlobalValue.h:512
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:59
@ LinkOnceODRLinkage
Same, but only replaced by something equivalent.
Definition: GlobalValue.h:55
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:278
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
Definition: LowLevelType.h:57
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
SyncScope::ID getOrInsertSyncScopeID(StringRef SSN)
getOrInsertSyncScopeID - Maps synchronization scope name to synchronization scope ID.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
iterator SkipPHIsAndLabels(iterator I)
Return the first instruction in MBB after I that is not a PHI or a label.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
MachineModuleInfo & getMMI() const
const MachineBasicBlock & front() const
Helper class to build MachineInstr.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:569
unsigned getNumExplicitDefs() const
Returns the number of non-implicit definitions.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:579
A description of a memory reference used in the backend.
This class can be derived from and used by targets to hold private target-specific information for ea...
This class contains meta information specific to a module.
const Module * getModule() const
Ty & getObjFileInfo()
Keep track of various per-module pieces of information for backends that would like to do so.
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
Register getReg() const
getReg - Returns the register number.
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
LLVMContext & getContext() const
Get the global data context.
Definition: Module.h:301
Analysis providing profile information.
Holds all the information related to register banks.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isValid() const
Definition: Register.h:116
SPIRVMachineModuleInfo(const MachineModuleInfo &MMI)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:344
bool contains(ConstPtrType Ptr) const
Definition: SmallPtrSet.h:418
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:479
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
std::string str() const
Return the twine contents as a std::string.
Definition: Twine.cpp:17
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition: Type.h:252
Type * getArrayElementType() const
Definition: Type.h:404
uint64_t getArrayNumElements() const
@ HalfTyID
16-bit floating point type
Definition: Type.h:56
@ FloatTyID
32-bit floating point type
Definition: Type.h:58
@ DoubleTyID
64-bit floating point type
Definition: Type.h:59
TypeID getTypeID() const
Return the type id for the type.
Definition: Type.h:137
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:348
bool hasName() const
Definition: Value.h:261
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:353
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:661
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
Definition: LLVMContext.h:54
@ System
Synchronized with respect to all concurrently executing threads.
Definition: LLVMContext.h:57
Reg
All possible values of the reg field in the ModR/M byte.
NodeAddr< DefNode * > Def
Definition: RDFGraph.h:384
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
Definition: SPIRVUtils.cpp:80
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition: Utils.cpp:155
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
Definition: TargetOpcodes.h:30
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
Definition: SPIRVUtils.cpp:273
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
Definition: SPIRVUtils.cpp:218
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Definition: SPIRVUtils.cpp:117
Type * toTypedPointer(Type *Ty)
Definition: SPIRVUtils.h:208
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
Definition: SPIRVUtils.cpp:190
AtomicOrdering
Atomic ordering for LLVM's memory model.
InstructionSelector * createSPIRVInstructionSelector(const SPIRVTargetMachine &TM, const SPIRVSubtarget &Subtarget, const RegisterBankInfo &RBI)
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
Definition: SPIRVUtils.cpp:279
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
Definition: SPIRVUtils.cpp:236
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
#define N