LLVM 17.0.0git
SPIRVInstructionSelector.cpp
Go to the documentation of this file.
1//===- SPIRVInstructionSelector.cpp ------------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the targeting of the InstructionSelector class for
10// SPIRV.
11// TODO: This should be generated by TableGen.
12//
13//===----------------------------------------------------------------------===//
14
15#include "SPIRV.h"
16#include "SPIRVGlobalRegistry.h"
17#include "SPIRVInstrInfo.h"
19#include "SPIRVRegisterInfo.h"
20#include "SPIRVTargetMachine.h"
21#include "SPIRVUtils.h"
22#include "llvm/ADT/APFloat.h"
27#include "llvm/IR/IntrinsicsSPIRV.h"
28#include "llvm/Support/Debug.h"
29
30#define DEBUG_TYPE "spirv-isel"
31
32using namespace llvm;
33namespace CL = SPIRV::OpenCLExtInst;
34namespace GL = SPIRV::GLSLExtInst;
35
37 std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
38
39namespace {
40
41#define GET_GLOBALISEL_PREDICATE_BITSET
42#include "SPIRVGenGlobalISel.inc"
43#undef GET_GLOBALISEL_PREDICATE_BITSET
44
45class SPIRVInstructionSelector : public InstructionSelector {
46 const SPIRVSubtarget &STI;
47 const SPIRVInstrInfo &TII;
49 const RegisterBankInfo &RBI;
52
53public:
54 SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
55 const SPIRVSubtarget &ST,
56 const RegisterBankInfo &RBI);
57 void setupMF(MachineFunction &MF, GISelKnownBits *KB,
58 CodeGenCoverage &CoverageInfo, ProfileSummaryInfo *PSI,
59 BlockFrequencyInfo *BFI) override;
60 // Common selection code. Instruction-specific selection occurs in spvSelect.
61 bool select(MachineInstr &I) override;
62 static const char *getName() { return DEBUG_TYPE; }
63
64#define GET_GLOBALISEL_PREDICATES_DECL
65#include "SPIRVGenGlobalISel.inc"
66#undef GET_GLOBALISEL_PREDICATES_DECL
67
68#define GET_GLOBALISEL_TEMPORARIES_DECL
69#include "SPIRVGenGlobalISel.inc"
70#undef GET_GLOBALISEL_TEMPORARIES_DECL
71
72private:
73 // tblgen-erated 'select' implementation, used as the initial selector for
74 // the patterns that don't require complex C++.
75 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
76
77 // All instruction-specific selection that didn't happen in "select()".
78 // Is basically a large Switch/Case delegating to all other select method.
79 bool spvSelect(Register ResVReg, const SPIRVType *ResType,
80 MachineInstr &I) const;
81
82 bool selectGlobalValue(Register ResVReg, MachineInstr &I,
83 const MachineInstr *Init = nullptr) const;
84
85 bool selectUnOpWithSrc(Register ResVReg, const SPIRVType *ResType,
86 MachineInstr &I, Register SrcReg,
87 unsigned Opcode) const;
88 bool selectUnOp(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
89 unsigned Opcode) const;
90
91 bool selectLoad(Register ResVReg, const SPIRVType *ResType,
92 MachineInstr &I) const;
93 bool selectStore(MachineInstr &I) const;
94
95 bool selectMemOperation(Register ResVReg, MachineInstr &I) const;
96
97 bool selectAtomicRMW(Register ResVReg, const SPIRVType *ResType,
98 MachineInstr &I, unsigned NewOpcode) const;
99
100 bool selectAtomicCmpXchg(Register ResVReg, const SPIRVType *ResType,
101 MachineInstr &I) const;
102
103 bool selectFence(MachineInstr &I) const;
104
105 bool selectAddrSpaceCast(Register ResVReg, const SPIRVType *ResType,
106 MachineInstr &I) const;
107
108 bool selectBitreverse(Register ResVReg, const SPIRVType *ResType,
109 MachineInstr &I) const;
110
111 bool selectConstVector(Register ResVReg, const SPIRVType *ResType,
112 MachineInstr &I) const;
113
114 bool selectCmp(Register ResVReg, const SPIRVType *ResType,
115 unsigned comparisonOpcode, MachineInstr &I) const;
116
117 bool selectICmp(Register ResVReg, const SPIRVType *ResType,
118 MachineInstr &I) const;
119 bool selectFCmp(Register ResVReg, const SPIRVType *ResType,
120 MachineInstr &I) const;
121
122 void renderImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
123 int OpIdx) const;
124 void renderFImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
125 int OpIdx) const;
126
127 bool selectConst(Register ResVReg, const SPIRVType *ResType, const APInt &Imm,
128 MachineInstr &I) const;
129
130 bool selectSelect(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
131 bool IsSigned) const;
132 bool selectIToF(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
133 bool IsSigned, unsigned Opcode) const;
134 bool selectExt(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
135 bool IsSigned) const;
136
137 bool selectTrunc(Register ResVReg, const SPIRVType *ResType,
138 MachineInstr &I) const;
139
140 bool selectIntToBool(Register IntReg, Register ResVReg, MachineInstr &I,
141 const SPIRVType *intTy, const SPIRVType *boolTy) const;
142
143 bool selectOpUndef(Register ResVReg, const SPIRVType *ResType,
144 MachineInstr &I) const;
145 bool selectIntrinsic(Register ResVReg, const SPIRVType *ResType,
146 MachineInstr &I) const;
147 bool selectExtractVal(Register ResVReg, const SPIRVType *ResType,
148 MachineInstr &I) const;
149 bool selectInsertVal(Register ResVReg, const SPIRVType *ResType,
150 MachineInstr &I) const;
151 bool selectExtractElt(Register ResVReg, const SPIRVType *ResType,
152 MachineInstr &I) const;
153 bool selectInsertElt(Register ResVReg, const SPIRVType *ResType,
154 MachineInstr &I) const;
155 bool selectGEP(Register ResVReg, const SPIRVType *ResType,
156 MachineInstr &I) const;
157
158 bool selectFrameIndex(Register ResVReg, const SPIRVType *ResType,
159 MachineInstr &I) const;
160
161 bool selectBranch(MachineInstr &I) const;
162 bool selectBranchCond(MachineInstr &I) const;
163
164 bool selectPhi(Register ResVReg, const SPIRVType *ResType,
165 MachineInstr &I) const;
166
167 bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
168 MachineInstr &I, CL::OpenCLExtInst CLInst) const;
169 bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
170 MachineInstr &I, CL::OpenCLExtInst CLInst,
171 GL::GLSLExtInst GLInst) const;
172 bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
173 MachineInstr &I, const ExtInstList &ExtInsts) const;
174
175 Register buildI32Constant(uint32_t Val, MachineInstr &I,
176 const SPIRVType *ResType = nullptr) const;
177
178 Register buildZerosVal(const SPIRVType *ResType, MachineInstr &I) const;
179 Register buildOnesVal(bool AllOnes, const SPIRVType *ResType,
180 MachineInstr &I) const;
181};
182
183} // end anonymous namespace
184
185#define GET_GLOBALISEL_IMPL
186#include "SPIRVGenGlobalISel.inc"
187#undef GET_GLOBALISEL_IMPL
188
189SPIRVInstructionSelector::SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
190 const SPIRVSubtarget &ST,
191 const RegisterBankInfo &RBI)
192 : InstructionSelector(), STI(ST), TII(*ST.getInstrInfo()),
193 TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
195#include "SPIRVGenGlobalISel.inc"
198#include "SPIRVGenGlobalISel.inc"
200{
201}
202
203void SPIRVInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
204 CodeGenCoverage &CoverageInfo,
206 BlockFrequencyInfo *BFI) {
207 MRI = &MF.getRegInfo();
208 GR.setCurrentFunc(MF);
209 InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
210}
211
212static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI);
213
214// Defined in SPIRVLegalizerInfo.cpp.
215extern bool isTypeFoldingSupported(unsigned Opcode);
216
217bool SPIRVInstructionSelector::select(MachineInstr &I) {
218 assert(I.getParent() && "Instruction should be in a basic block!");
219 assert(I.getParent()->getParent() && "Instruction should be in a function!");
220
221 Register Opcode = I.getOpcode();
222 // If it's not a GMIR instruction, we've selected it already.
223 if (!isPreISelGenericOpcode(Opcode)) {
224 if (Opcode == SPIRV::ASSIGN_TYPE) { // These pseudos aren't needed any more.
225 auto *Def = MRI->getVRegDef(I.getOperand(1).getReg());
226 if (isTypeFoldingSupported(Def->getOpcode())) {
227 auto Res = selectImpl(I, *CoverageInfo);
228 assert(Res || Def->getOpcode() == TargetOpcode::G_CONSTANT);
229 if (Res)
230 return Res;
231 }
232 MRI->replaceRegWith(I.getOperand(1).getReg(), I.getOperand(0).getReg());
233 I.removeFromParent();
234 return true;
235 } else if (I.getNumDefs() == 1) {
236 // Make all vregs 32 bits (for SPIR-V IDs).
237 MRI->setType(I.getOperand(0).getReg(), LLT::scalar(32));
238 }
240 }
241
242 if (I.getNumOperands() != I.getNumExplicitOperands()) {
243 LLVM_DEBUG(errs() << "Generic instr has unexpected implicit operands\n");
244 return false;
245 }
246
247 // Common code for getting return reg+type, and removing selected instr
248 // from parent occurs here. Instr-specific selection happens in spvSelect().
249 bool HasDefs = I.getNumDefs() > 0;
250 Register ResVReg = HasDefs ? I.getOperand(0).getReg() : Register(0);
251 SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) : nullptr;
252 assert(!HasDefs || ResType || I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
253 if (spvSelect(ResVReg, ResType, I)) {
254 if (HasDefs) // Make all vregs 32 bits (for SPIR-V IDs).
255 MRI->setType(ResVReg, LLT::scalar(32));
256 I.removeFromParent();
257 return true;
258 }
259 return false;
260}
261
262bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
263 const SPIRVType *ResType,
264 MachineInstr &I) const {
265 assert(!isTypeFoldingSupported(I.getOpcode()) ||
266 I.getOpcode() == TargetOpcode::G_CONSTANT);
267 const unsigned Opcode = I.getOpcode();
268 switch (Opcode) {
269 case TargetOpcode::G_CONSTANT:
270 return selectConst(ResVReg, ResType, I.getOperand(1).getCImm()->getValue(),
271 I);
272 case TargetOpcode::G_GLOBAL_VALUE:
273 return selectGlobalValue(ResVReg, I);
274 case TargetOpcode::G_IMPLICIT_DEF:
275 return selectOpUndef(ResVReg, ResType, I);
276
277 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
278 return selectIntrinsic(ResVReg, ResType, I);
279 case TargetOpcode::G_BITREVERSE:
280 return selectBitreverse(ResVReg, ResType, I);
281
282 case TargetOpcode::G_BUILD_VECTOR:
283 return selectConstVector(ResVReg, ResType, I);
284
285 case TargetOpcode::G_SHUFFLE_VECTOR: {
286 MachineBasicBlock &BB = *I.getParent();
287 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorShuffle))
288 .addDef(ResVReg)
289 .addUse(GR.getSPIRVTypeID(ResType))
290 .addUse(I.getOperand(1).getReg())
291 .addUse(I.getOperand(2).getReg());
292 for (auto V : I.getOperand(3).getShuffleMask())
293 MIB.addImm(V);
294 return MIB.constrainAllUses(TII, TRI, RBI);
295 }
296 case TargetOpcode::G_MEMMOVE:
297 case TargetOpcode::G_MEMCPY:
298 case TargetOpcode::G_MEMSET:
299 return selectMemOperation(ResVReg, I);
300
301 case TargetOpcode::G_ICMP:
302 return selectICmp(ResVReg, ResType, I);
303 case TargetOpcode::G_FCMP:
304 return selectFCmp(ResVReg, ResType, I);
305
306 case TargetOpcode::G_FRAME_INDEX:
307 return selectFrameIndex(ResVReg, ResType, I);
308
309 case TargetOpcode::G_LOAD:
310 return selectLoad(ResVReg, ResType, I);
311 case TargetOpcode::G_STORE:
312 return selectStore(I);
313
314 case TargetOpcode::G_BR:
315 return selectBranch(I);
316 case TargetOpcode::G_BRCOND:
317 return selectBranchCond(I);
318
319 case TargetOpcode::G_PHI:
320 return selectPhi(ResVReg, ResType, I);
321
322 case TargetOpcode::G_FPTOSI:
323 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToS);
324 case TargetOpcode::G_FPTOUI:
325 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToU);
326
327 case TargetOpcode::G_SITOFP:
328 return selectIToF(ResVReg, ResType, I, true, SPIRV::OpConvertSToF);
329 case TargetOpcode::G_UITOFP:
330 return selectIToF(ResVReg, ResType, I, false, SPIRV::OpConvertUToF);
331
332 case TargetOpcode::G_CTPOP:
333 return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitCount);
334 case TargetOpcode::G_SMIN:
335 return selectExtInst(ResVReg, ResType, I, CL::s_min, GL::SMin);
336 case TargetOpcode::G_UMIN:
337 return selectExtInst(ResVReg, ResType, I, CL::u_min, GL::UMin);
338
339 case TargetOpcode::G_SMAX:
340 return selectExtInst(ResVReg, ResType, I, CL::s_max, GL::SMax);
341 case TargetOpcode::G_UMAX:
342 return selectExtInst(ResVReg, ResType, I, CL::u_max, GL::UMax);
343
344 case TargetOpcode::G_FMA:
345 return selectExtInst(ResVReg, ResType, I, CL::fma, GL::Fma);
346
347 case TargetOpcode::G_FPOW:
348 return selectExtInst(ResVReg, ResType, I, CL::pow, GL::Pow);
349 case TargetOpcode::G_FPOWI:
350 return selectExtInst(ResVReg, ResType, I, CL::pown);
351
352 case TargetOpcode::G_FEXP:
353 return selectExtInst(ResVReg, ResType, I, CL::exp, GL::Exp);
354 case TargetOpcode::G_FEXP2:
355 return selectExtInst(ResVReg, ResType, I, CL::exp2, GL::Exp2);
356
357 case TargetOpcode::G_FLOG:
358 return selectExtInst(ResVReg, ResType, I, CL::log, GL::Log);
359 case TargetOpcode::G_FLOG2:
360 return selectExtInst(ResVReg, ResType, I, CL::log2, GL::Log2);
361 case TargetOpcode::G_FLOG10:
362 return selectExtInst(ResVReg, ResType, I, CL::log10);
363
364 case TargetOpcode::G_FABS:
365 return selectExtInst(ResVReg, ResType, I, CL::fabs, GL::FAbs);
366 case TargetOpcode::G_ABS:
367 return selectExtInst(ResVReg, ResType, I, CL::s_abs, GL::SAbs);
368
369 case TargetOpcode::G_FMINNUM:
370 case TargetOpcode::G_FMINIMUM:
371 return selectExtInst(ResVReg, ResType, I, CL::fmin, GL::FMin);
372 case TargetOpcode::G_FMAXNUM:
373 case TargetOpcode::G_FMAXIMUM:
374 return selectExtInst(ResVReg, ResType, I, CL::fmax, GL::FMax);
375
376 case TargetOpcode::G_FCOPYSIGN:
377 return selectExtInst(ResVReg, ResType, I, CL::copysign);
378
379 case TargetOpcode::G_FCEIL:
380 return selectExtInst(ResVReg, ResType, I, CL::ceil, GL::Ceil);
381 case TargetOpcode::G_FFLOOR:
382 return selectExtInst(ResVReg, ResType, I, CL::floor, GL::Floor);
383
384 case TargetOpcode::G_FCOS:
385 return selectExtInst(ResVReg, ResType, I, CL::cos, GL::Cos);
386 case TargetOpcode::G_FSIN:
387 return selectExtInst(ResVReg, ResType, I, CL::sin, GL::Sin);
388
389 case TargetOpcode::G_FSQRT:
390 return selectExtInst(ResVReg, ResType, I, CL::sqrt, GL::Sqrt);
391
392 case TargetOpcode::G_CTTZ:
393 case TargetOpcode::G_CTTZ_ZERO_UNDEF:
394 return selectExtInst(ResVReg, ResType, I, CL::ctz);
395 case TargetOpcode::G_CTLZ:
396 case TargetOpcode::G_CTLZ_ZERO_UNDEF:
397 return selectExtInst(ResVReg, ResType, I, CL::clz);
398
399 case TargetOpcode::G_INTRINSIC_ROUND:
400 return selectExtInst(ResVReg, ResType, I, CL::round, GL::Round);
401 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
402 return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
403 case TargetOpcode::G_INTRINSIC_TRUNC:
404 return selectExtInst(ResVReg, ResType, I, CL::trunc, GL::Trunc);
405 case TargetOpcode::G_FRINT:
406 case TargetOpcode::G_FNEARBYINT:
407 return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
408
409 case TargetOpcode::G_SMULH:
410 return selectExtInst(ResVReg, ResType, I, CL::s_mul_hi);
411 case TargetOpcode::G_UMULH:
412 return selectExtInst(ResVReg, ResType, I, CL::u_mul_hi);
413
414 case TargetOpcode::G_SEXT:
415 return selectExt(ResVReg, ResType, I, true);
416 case TargetOpcode::G_ANYEXT:
417 case TargetOpcode::G_ZEXT:
418 return selectExt(ResVReg, ResType, I, false);
419 case TargetOpcode::G_TRUNC:
420 return selectTrunc(ResVReg, ResType, I);
421 case TargetOpcode::G_FPTRUNC:
422 case TargetOpcode::G_FPEXT:
423 return selectUnOp(ResVReg, ResType, I, SPIRV::OpFConvert);
424
425 case TargetOpcode::G_PTRTOINT:
426 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertPtrToU);
427 case TargetOpcode::G_INTTOPTR:
428 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertUToPtr);
429 case TargetOpcode::G_BITCAST:
430 return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
431 case TargetOpcode::G_ADDRSPACE_CAST:
432 return selectAddrSpaceCast(ResVReg, ResType, I);
433 case TargetOpcode::G_PTR_ADD: {
434 // Currently, we get G_PTR_ADD only as a result of translating
435 // global variables, initialized with constant expressions like GV + Const
436 // (see test opencl/basic/progvar_prog_scope_init.ll).
437 // TODO: extend the handler once we have other cases.
438 assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
439 Register GV = I.getOperand(1).getReg();
440 MachineRegisterInfo::def_instr_iterator II = MRI->def_instr_begin(GV);
441 assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
442 (*II).getOpcode() == TargetOpcode::COPY ||
443 (*II).getOpcode() == SPIRV::OpVariable) &&
444 isImm(I.getOperand(2), MRI));
445 Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32, I, TII), I);
446 MachineBasicBlock &BB = *I.getParent();
447 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
448 .addDef(ResVReg)
449 .addUse(GR.getSPIRVTypeID(ResType))
450 .addImm(static_cast<uint32_t>(
451 SPIRV::Opcode::InBoundsPtrAccessChain))
452 .addUse(GV)
453 .addUse(Idx)
454 .addUse(I.getOperand(2).getReg());
455 return MIB.constrainAllUses(TII, TRI, RBI);
456 }
457
458 case TargetOpcode::G_ATOMICRMW_OR:
459 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicOr);
460 case TargetOpcode::G_ATOMICRMW_ADD:
461 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicIAdd);
462 case TargetOpcode::G_ATOMICRMW_AND:
463 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicAnd);
464 case TargetOpcode::G_ATOMICRMW_MAX:
465 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMax);
466 case TargetOpcode::G_ATOMICRMW_MIN:
467 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMin);
468 case TargetOpcode::G_ATOMICRMW_SUB:
469 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicISub);
470 case TargetOpcode::G_ATOMICRMW_XOR:
471 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicXor);
472 case TargetOpcode::G_ATOMICRMW_UMAX:
473 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMax);
474 case TargetOpcode::G_ATOMICRMW_UMIN:
475 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMin);
476 case TargetOpcode::G_ATOMICRMW_XCHG:
477 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicExchange);
478 case TargetOpcode::G_ATOMIC_CMPXCHG:
479 return selectAtomicCmpXchg(ResVReg, ResType, I);
480
481 case TargetOpcode::G_FENCE:
482 return selectFence(I);
483
484 default:
485 return false;
486 }
487}
488
489bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
490 const SPIRVType *ResType,
492 CL::OpenCLExtInst CLInst) const {
493 return selectExtInst(ResVReg, ResType, I,
494 {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
495}
496
497bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
498 const SPIRVType *ResType,
500 CL::OpenCLExtInst CLInst,
501 GL::GLSLExtInst GLInst) const {
502 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
503 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
504 return selectExtInst(ResVReg, ResType, I, ExtInsts);
505}
506
507bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
508 const SPIRVType *ResType,
510 const ExtInstList &Insts) const {
511
512 for (const auto &Ex : Insts) {
513 SPIRV::InstructionSet::InstructionSet Set = Ex.first;
514 uint32_t Opcode = Ex.second;
515 if (STI.canUseExtInstSet(Set)) {
516 MachineBasicBlock &BB = *I.getParent();
517 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
518 .addDef(ResVReg)
519 .addUse(GR.getSPIRVTypeID(ResType))
520 .addImm(static_cast<uint32_t>(Set))
521 .addImm(Opcode);
522 const unsigned NumOps = I.getNumOperands();
523 for (unsigned i = 1; i < NumOps; ++i)
524 MIB.add(I.getOperand(i));
525 return MIB.constrainAllUses(TII, TRI, RBI);
526 }
527 }
528 return false;
529}
530
531bool SPIRVInstructionSelector::selectUnOpWithSrc(Register ResVReg,
532 const SPIRVType *ResType,
534 Register SrcReg,
535 unsigned Opcode) const {
536 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
537 .addDef(ResVReg)
538 .addUse(GR.getSPIRVTypeID(ResType))
539 .addUse(SrcReg)
540 .constrainAllUses(TII, TRI, RBI);
541}
542
543bool SPIRVInstructionSelector::selectUnOp(Register ResVReg,
544 const SPIRVType *ResType,
546 unsigned Opcode) const {
547 return selectUnOpWithSrc(ResVReg, ResType, I, I.getOperand(1).getReg(),
548 Opcode);
549}
550
551static SPIRV::Scope::Scope getScope(SyncScope::ID Ord) {
552 switch (Ord) {
554 return SPIRV::Scope::Invocation;
556 return SPIRV::Scope::Device;
557 default:
558 llvm_unreachable("Unsupported synchronization Scope ID.");
559 }
560}
561
563 MachineInstrBuilder &MIB) {
564 uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
565 if (MemOp->isVolatile())
566 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
567 if (MemOp->isNonTemporal())
568 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
569 if (MemOp->getAlign().value())
570 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
571
572 if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
573 MIB.addImm(SpvMemOp);
574 if (SpvMemOp & static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
575 MIB.addImm(MemOp->getAlign().value());
576 }
577}
578
580 uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
581 if (Flags & MachineMemOperand::Flags::MOVolatile)
582 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
583 if (Flags & MachineMemOperand::Flags::MONonTemporal)
584 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
585
586 if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None))
587 MIB.addImm(SpvMemOp);
588}
589
590bool SPIRVInstructionSelector::selectLoad(Register ResVReg,
591 const SPIRVType *ResType,
592 MachineInstr &I) const {
593 unsigned OpOffset =
594 I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ? 1 : 0;
595 Register Ptr = I.getOperand(1 + OpOffset).getReg();
596 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
597 .addDef(ResVReg)
598 .addUse(GR.getSPIRVTypeID(ResType))
599 .addUse(Ptr);
600 if (!I.getNumMemOperands()) {
601 assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS);
602 addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
603 } else {
604 addMemoryOperands(*I.memoperands_begin(), MIB);
605 }
606 return MIB.constrainAllUses(TII, TRI, RBI);
607}
608
609bool SPIRVInstructionSelector::selectStore(MachineInstr &I) const {
610 unsigned OpOffset =
611 I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ? 1 : 0;
612 Register StoreVal = I.getOperand(0 + OpOffset).getReg();
613 Register Ptr = I.getOperand(1 + OpOffset).getReg();
614 MachineBasicBlock &BB = *I.getParent();
615 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpStore))
616 .addUse(Ptr)
617 .addUse(StoreVal);
618 if (!I.getNumMemOperands()) {
619 assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS);
620 addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
621 } else {
622 addMemoryOperands(*I.memoperands_begin(), MIB);
623 }
624 return MIB.constrainAllUses(TII, TRI, RBI);
625}
626
627bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
628 MachineInstr &I) const {
629 MachineBasicBlock &BB = *I.getParent();
630 Register SrcReg = I.getOperand(1).getReg();
631 if (I.getOpcode() == TargetOpcode::G_MEMSET) {
632 assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
633 unsigned Val = getIConstVal(I.getOperand(1).getReg(), MRI);
634 unsigned Num = getIConstVal(I.getOperand(2).getReg(), MRI);
635 SPIRVType *ValTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
636 SPIRVType *ArrTy = GR.getOrCreateSPIRVArrayType(ValTy, Num, I, TII);
637 Register Const = GR.getOrCreateConsIntArray(Val, I, ArrTy, TII);
638 SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType(
639 ArrTy, I, TII, SPIRV::StorageClass::UniformConstant);
640 // TODO: check if we have such GV, add init, use buildGlobalVariable.
641 Type *LLVMArrTy = ArrayType::get(
642 IntegerType::get(GR.CurMF->getFunction().getContext(), 8), Num);
643 GlobalVariable *GV =
644 new GlobalVariable(LLVMArrTy, true, GlobalValue::InternalLinkage);
645 Register VarReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
646 GR.add(GV, GR.CurMF, VarReg);
647
648 buildOpDecorate(VarReg, I, TII, SPIRV::Decoration::Constant, {});
649 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
650 .addDef(VarReg)
651 .addUse(GR.getSPIRVTypeID(VarTy))
652 .addImm(SPIRV::StorageClass::UniformConstant)
653 .addUse(Const)
654 .constrainAllUses(TII, TRI, RBI);
655 SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType(
656 ValTy, I, TII, SPIRV::StorageClass::UniformConstant);
657 SrcReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
658 selectUnOpWithSrc(SrcReg, SourceTy, I, VarReg, SPIRV::OpBitcast);
659 }
660 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCopyMemorySized))
661 .addUse(I.getOperand(0).getReg())
662 .addUse(SrcReg)
663 .addUse(I.getOperand(2).getReg());
664 if (I.getNumMemOperands())
665 addMemoryOperands(*I.memoperands_begin(), MIB);
666 bool Result = MIB.constrainAllUses(TII, TRI, RBI);
667 if (ResVReg.isValid() && ResVReg != MIB->getOperand(0).getReg())
668 BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg)
669 .addUse(MIB->getOperand(0).getReg());
670 return Result;
671}
672
673bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
674 const SPIRVType *ResType,
676 unsigned NewOpcode) const {
677 assert(I.hasOneMemOperand());
678 const MachineMemOperand *MemOp = *I.memoperands_begin();
679 uint32_t Scope = static_cast<uint32_t>(getScope(MemOp->getSyncScopeID()));
680 Register ScopeReg = buildI32Constant(Scope, I);
681
682 Register Ptr = I.getOperand(1).getReg();
683 // TODO: Changed as it's implemented in the translator. See test/atomicrmw.ll
684 // auto ScSem =
685 // getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr));
686 AtomicOrdering AO = MemOp->getSuccessOrdering();
687 uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
688 Register MemSemReg = buildI32Constant(MemSem /*| ScSem*/, I);
689
690 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode))
691 .addDef(ResVReg)
692 .addUse(GR.getSPIRVTypeID(ResType))
693 .addUse(Ptr)
694 .addUse(ScopeReg)
695 .addUse(MemSemReg)
696 .addUse(I.getOperand(2).getReg())
697 .constrainAllUses(TII, TRI, RBI);
698}
699
700bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const {
701 AtomicOrdering AO = AtomicOrdering(I.getOperand(0).getImm());
702 uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
703 Register MemSemReg = buildI32Constant(MemSem, I);
704 SyncScope::ID Ord = SyncScope::ID(I.getOperand(1).getImm());
705 uint32_t Scope = static_cast<uint32_t>(getScope(Ord));
706 Register ScopeReg = buildI32Constant(Scope, I);
707 MachineBasicBlock &BB = *I.getParent();
708 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier))
709 .addUse(ScopeReg)
710 .addUse(MemSemReg)
711 .constrainAllUses(TII, TRI, RBI);
712}
713
714bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
715 const SPIRVType *ResType,
716 MachineInstr &I) const {
717 Register ScopeReg;
718 Register MemSemEqReg;
719 Register MemSemNeqReg;
720 Register Ptr = I.getOperand(2).getReg();
721 if (I.getOpcode() != TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS) {
722 assert(I.hasOneMemOperand());
723 const MachineMemOperand *MemOp = *I.memoperands_begin();
724 unsigned Scope = static_cast<uint32_t>(getScope(MemOp->getSyncScopeID()));
725 ScopeReg = buildI32Constant(Scope, I);
726
727 unsigned ScSem = static_cast<uint32_t>(
728 getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr)));
729 AtomicOrdering AO = MemOp->getSuccessOrdering();
730 unsigned MemSemEq = static_cast<uint32_t>(getMemSemantics(AO)) | ScSem;
731 MemSemEqReg = buildI32Constant(MemSemEq, I);
732 AtomicOrdering FO = MemOp->getFailureOrdering();
733 unsigned MemSemNeq = static_cast<uint32_t>(getMemSemantics(FO)) | ScSem;
734 MemSemNeqReg =
735 MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq, I);
736 } else {
737 ScopeReg = I.getOperand(5).getReg();
738 MemSemEqReg = I.getOperand(6).getReg();
739 MemSemNeqReg = I.getOperand(7).getReg();
740 }
741
742 Register Cmp = I.getOperand(3).getReg();
743 Register Val = I.getOperand(4).getReg();
744 SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
745 Register ACmpRes = MRI->createVirtualRegister(&SPIRV::IDRegClass);
746 const DebugLoc &DL = I.getDebugLoc();
747 bool Result =
748 BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange))
749 .addDef(ACmpRes)
750 .addUse(GR.getSPIRVTypeID(SpvValTy))
751 .addUse(Ptr)
752 .addUse(ScopeReg)
753 .addUse(MemSemEqReg)
754 .addUse(MemSemNeqReg)
755 .addUse(Val)
756 .addUse(Cmp)
757 .constrainAllUses(TII, TRI, RBI);
758 Register CmpSuccReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
759 SPIRVType *BoolTy = GR.getOrCreateSPIRVBoolType(I, TII);
760 Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpIEqual))
761 .addDef(CmpSuccReg)
762 .addUse(GR.getSPIRVTypeID(BoolTy))
763 .addUse(ACmpRes)
764 .addUse(Cmp)
765 .constrainAllUses(TII, TRI, RBI);
766 Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
767 Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
768 .addDef(TmpReg)
769 .addUse(GR.getSPIRVTypeID(ResType))
770 .addUse(ACmpRes)
771 .addUse(GR.getOrCreateUndef(I, ResType, TII))
772 .addImm(0)
773 .constrainAllUses(TII, TRI, RBI);
774 Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
775 .addDef(ResVReg)
776 .addUse(GR.getSPIRVTypeID(ResType))
777 .addUse(CmpSuccReg)
778 .addUse(TmpReg)
779 .addImm(1)
780 .constrainAllUses(TII, TRI, RBI);
781 return Result;
782}
783
784static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC) {
785 switch (SC) {
786 case SPIRV::StorageClass::Workgroup:
787 case SPIRV::StorageClass::CrossWorkgroup:
788 case SPIRV::StorageClass::Function:
789 return true;
790 default:
791 return false;
792 }
793}
794
795// In SPIR-V address space casting can only happen to and from the Generic
796// storage class. We can also only case Workgroup, CrossWorkgroup, or Function
797// pointers to and from Generic pointers. As such, we can convert e.g. from
798// Workgroup to Function by going via a Generic pointer as an intermediary. All
799// other combinations can only be done by a bitcast, and are probably not safe.
800bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg,
801 const SPIRVType *ResType,
802 MachineInstr &I) const {
803 // If the AddrSpaceCast user is single and in OpConstantComposite or
804 // OpVariable, we should select OpSpecConstantOp.
805 auto UIs = MRI->use_instructions(ResVReg);
806 if (!UIs.empty() && ++UIs.begin() == UIs.end() &&
807 (UIs.begin()->getOpcode() == SPIRV::OpConstantComposite ||
808 UIs.begin()->getOpcode() == SPIRV::OpVariable ||
809 isSpvIntrinsic(*UIs.begin(), Intrinsic::spv_init_global))) {
810 Register NewReg = I.getOperand(1).getReg();
811 MachineBasicBlock &BB = *I.getParent();
812 SPIRVType *SpvBaseTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
813 ResType = GR.getOrCreateSPIRVPointerType(SpvBaseTy, I, TII,
814 SPIRV::StorageClass::Generic);
815 bool Result =
816 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
817 .addDef(ResVReg)
818 .addUse(GR.getSPIRVTypeID(ResType))
819 .addImm(static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric))
820 .addUse(NewReg)
821 .constrainAllUses(TII, TRI, RBI);
822 return Result;
823 }
824 Register SrcPtr = I.getOperand(1).getReg();
825 SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
826 SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtr);
827 SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResVReg);
828
829 // Casting from an eligable pointer to Generic.
830 if (DstSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(SrcSC))
831 return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric);
832 // Casting from Generic to an eligable pointer.
833 if (SrcSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(DstSC))
834 return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr);
835 // Casting between 2 eligable pointers using Generic as an intermediary.
836 if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) {
837 Register Tmp = MRI->createVirtualRegister(&SPIRV::IDRegClass);
838 SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
839 SrcPtrTy, I, TII, SPIRV::StorageClass::Generic);
840 MachineBasicBlock &BB = *I.getParent();
841 const DebugLoc &DL = I.getDebugLoc();
842 bool Success = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric))
843 .addDef(Tmp)
844 .addUse(GR.getSPIRVTypeID(GenericPtrTy))
845 .addUse(SrcPtr)
846 .constrainAllUses(TII, TRI, RBI);
847 return Success && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr))
848 .addDef(ResVReg)
849 .addUse(GR.getSPIRVTypeID(ResType))
850 .addUse(Tmp)
851 .constrainAllUses(TII, TRI, RBI);
852 }
853 // TODO Should this case just be disallowed completely?
854 // We're casting 2 other arbitrary address spaces, so have to bitcast.
855 return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
856}
857
858static unsigned getFCmpOpcode(unsigned PredNum) {
859 auto Pred = static_cast<CmpInst::Predicate>(PredNum);
860 switch (Pred) {
862 return SPIRV::OpFOrdEqual;
864 return SPIRV::OpFOrdGreaterThanEqual;
866 return SPIRV::OpFOrdGreaterThan;
868 return SPIRV::OpFOrdLessThanEqual;
870 return SPIRV::OpFOrdLessThan;
872 return SPIRV::OpFOrdNotEqual;
874 return SPIRV::OpOrdered;
876 return SPIRV::OpFUnordEqual;
878 return SPIRV::OpFUnordGreaterThanEqual;
880 return SPIRV::OpFUnordGreaterThan;
882 return SPIRV::OpFUnordLessThanEqual;
884 return SPIRV::OpFUnordLessThan;
886 return SPIRV::OpFUnordNotEqual;
888 return SPIRV::OpUnordered;
889 default:
890 llvm_unreachable("Unknown predicate type for FCmp");
891 }
892}
893
894static unsigned getICmpOpcode(unsigned PredNum) {
895 auto Pred = static_cast<CmpInst::Predicate>(PredNum);
896 switch (Pred) {
897 case CmpInst::ICMP_EQ:
898 return SPIRV::OpIEqual;
899 case CmpInst::ICMP_NE:
900 return SPIRV::OpINotEqual;
902 return SPIRV::OpSGreaterThanEqual;
904 return SPIRV::OpSGreaterThan;
906 return SPIRV::OpSLessThanEqual;
908 return SPIRV::OpSLessThan;
910 return SPIRV::OpUGreaterThanEqual;
912 return SPIRV::OpUGreaterThan;
914 return SPIRV::OpULessThanEqual;
916 return SPIRV::OpULessThan;
917 default:
918 llvm_unreachable("Unknown predicate type for ICmp");
919 }
920}
921
922static unsigned getPtrCmpOpcode(unsigned Pred) {
923 switch (static_cast<CmpInst::Predicate>(Pred)) {
924 case CmpInst::ICMP_EQ:
925 return SPIRV::OpPtrEqual;
926 case CmpInst::ICMP_NE:
927 return SPIRV::OpPtrNotEqual;
928 default:
929 llvm_unreachable("Unknown predicate type for pointer comparison");
930 }
931}
932
933// Return the logical operation, or abort if none exists.
934static unsigned getBoolCmpOpcode(unsigned PredNum) {
935 auto Pred = static_cast<CmpInst::Predicate>(PredNum);
936 switch (Pred) {
937 case CmpInst::ICMP_EQ:
938 return SPIRV::OpLogicalEqual;
939 case CmpInst::ICMP_NE:
940 return SPIRV::OpLogicalNotEqual;
941 default:
942 llvm_unreachable("Unknown predicate type for Bool comparison");
943 }
944}
945
946bool SPIRVInstructionSelector::selectBitreverse(Register ResVReg,
947 const SPIRVType *ResType,
948 MachineInstr &I) const {
949 MachineBasicBlock &BB = *I.getParent();
950 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitReverse))
951 .addDef(ResVReg)
952 .addUse(GR.getSPIRVTypeID(ResType))
953 .addUse(I.getOperand(1).getReg())
954 .constrainAllUses(TII, TRI, RBI);
955}
956
957bool SPIRVInstructionSelector::selectConstVector(Register ResVReg,
958 const SPIRVType *ResType,
959 MachineInstr &I) const {
960 // TODO: only const case is supported for now.
961 assert(std::all_of(
962 I.operands_begin(), I.operands_end(), [this](const MachineOperand &MO) {
963 if (MO.isDef())
964 return true;
965 if (!MO.isReg())
966 return false;
967 SPIRVType *ConstTy = this->MRI->getVRegDef(MO.getReg());
968 assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
969 ConstTy->getOperand(1).isReg());
970 Register ConstReg = ConstTy->getOperand(1).getReg();
971 const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
972 assert(Const);
973 return (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
974 Const->getOpcode() == TargetOpcode::G_FCONSTANT);
975 }));
976
977 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
978 TII.get(SPIRV::OpConstantComposite))
979 .addDef(ResVReg)
980 .addUse(GR.getSPIRVTypeID(ResType));
981 for (unsigned i = I.getNumExplicitDefs(); i < I.getNumExplicitOperands(); ++i)
982 MIB.addUse(I.getOperand(i).getReg());
983 return MIB.constrainAllUses(TII, TRI, RBI);
984}
985
986bool SPIRVInstructionSelector::selectCmp(Register ResVReg,
987 const SPIRVType *ResType,
988 unsigned CmpOpc,
989 MachineInstr &I) const {
990 Register Cmp0 = I.getOperand(2).getReg();
991 Register Cmp1 = I.getOperand(3).getReg();
992 assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
993 GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
994 "CMP operands should have the same type");
995 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CmpOpc))
996 .addDef(ResVReg)
997 .addUse(GR.getSPIRVTypeID(ResType))
998 .addUse(Cmp0)
999 .addUse(Cmp1)
1000 .constrainAllUses(TII, TRI, RBI);
1001}
1002
1003bool SPIRVInstructionSelector::selectICmp(Register ResVReg,
1004 const SPIRVType *ResType,
1005 MachineInstr &I) const {
1006 auto Pred = I.getOperand(1).getPredicate();
1007 unsigned CmpOpc;
1008
1009 Register CmpOperand = I.getOperand(2).getReg();
1010 if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
1011 CmpOpc = getPtrCmpOpcode(Pred);
1012 else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
1013 CmpOpc = getBoolCmpOpcode(Pred);
1014 else
1015 CmpOpc = getICmpOpcode(Pred);
1016 return selectCmp(ResVReg, ResType, CmpOpc, I);
1017}
1018
1019void SPIRVInstructionSelector::renderFImm32(MachineInstrBuilder &MIB,
1020 const MachineInstr &I,
1021 int OpIdx) const {
1022 assert(I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
1023 "Expected G_FCONSTANT");
1024 const ConstantFP *FPImm = I.getOperand(1).getFPImm();
1025 addNumImm(FPImm->getValueAPF().bitcastToAPInt(), MIB);
1026}
1027
1028void SPIRVInstructionSelector::renderImm32(MachineInstrBuilder &MIB,
1029 const MachineInstr &I,
1030 int OpIdx) const {
1031 assert(I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1032 "Expected G_CONSTANT");
1033 addNumImm(I.getOperand(1).getCImm()->getValue(), MIB);
1034}
1035
1037SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I,
1038 const SPIRVType *ResType) const {
1039 Type *LLVMTy = IntegerType::get(GR.CurMF->getFunction().getContext(), 32);
1040 const SPIRVType *SpvI32Ty =
1041 ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32, I, TII);
1042 // Find a constant in DT or build a new one.
1043 auto ConstInt = ConstantInt::get(LLVMTy, Val);
1044 Register NewReg = GR.find(ConstInt, GR.CurMF);
1045 if (!NewReg.isValid()) {
1046 NewReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
1047 GR.add(ConstInt, GR.CurMF, NewReg);
1049 MachineBasicBlock &BB = *I.getParent();
1050 if (Val == 0) {
1051 MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1052 .addDef(NewReg)
1053 .addUse(GR.getSPIRVTypeID(SpvI32Ty));
1054 } else {
1055 MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
1056 .addDef(NewReg)
1057 .addUse(GR.getSPIRVTypeID(SpvI32Ty))
1058 .addImm(APInt(32, Val).getZExtValue());
1059 }
1061 }
1062 return NewReg;
1063}
1064
1065bool SPIRVInstructionSelector::selectFCmp(Register ResVReg,
1066 const SPIRVType *ResType,
1067 MachineInstr &I) const {
1068 unsigned CmpOp = getFCmpOpcode(I.getOperand(1).getPredicate());
1069 return selectCmp(ResVReg, ResType, CmpOp, I);
1070}
1071
1072Register SPIRVInstructionSelector::buildZerosVal(const SPIRVType *ResType,
1073 MachineInstr &I) const {
1074 if (ResType->getOpcode() == SPIRV::OpTypeVector)
1075 return GR.getOrCreateConsIntVector(0, I, ResType, TII);
1076 return GR.getOrCreateConstInt(0, I, ResType, TII);
1077}
1078
1079Register SPIRVInstructionSelector::buildOnesVal(bool AllOnes,
1080 const SPIRVType *ResType,
1081 MachineInstr &I) const {
1082 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1083 APInt One =
1084 AllOnes ? APInt::getAllOnes(BitWidth) : APInt::getOneBitSet(BitWidth, 0);
1085 if (ResType->getOpcode() == SPIRV::OpTypeVector)
1086 return GR.getOrCreateConsIntVector(One.getZExtValue(), I, ResType, TII);
1087 return GR.getOrCreateConstInt(One.getZExtValue(), I, ResType, TII);
1088}
1089
1090bool SPIRVInstructionSelector::selectSelect(Register ResVReg,
1091 const SPIRVType *ResType,
1092 MachineInstr &I,
1093 bool IsSigned) const {
1094 // To extend a bool, we need to use OpSelect between constants.
1095 Register ZeroReg = buildZerosVal(ResType, I);
1096 Register OneReg = buildOnesVal(IsSigned, ResType, I);
1097 bool IsScalarBool =
1098 GR.isScalarOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool);
1099 unsigned Opcode =
1100 IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectSIVCond;
1101 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
1102 .addDef(ResVReg)
1103 .addUse(GR.getSPIRVTypeID(ResType))
1104 .addUse(I.getOperand(1).getReg())
1105 .addUse(OneReg)
1106 .addUse(ZeroReg)
1107 .constrainAllUses(TII, TRI, RBI);
1108}
1109
1110bool SPIRVInstructionSelector::selectIToF(Register ResVReg,
1111 const SPIRVType *ResType,
1112 MachineInstr &I, bool IsSigned,
1113 unsigned Opcode) const {
1114 Register SrcReg = I.getOperand(1).getReg();
1115 // We can convert bool value directly to float type without OpConvert*ToF,
1116 // however the translator generates OpSelect+OpConvert*ToF, so we do the same.
1117 if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
1118 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1119 SPIRVType *TmpType = GR.getOrCreateSPIRVIntegerType(BitWidth, I, TII);
1120 if (ResType->getOpcode() == SPIRV::OpTypeVector) {
1121 const unsigned NumElts = ResType->getOperand(2).getImm();
1122 TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII);
1123 }
1124 SrcReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1125 selectSelect(SrcReg, TmpType, I, false);
1126 }
1127 return selectUnOpWithSrc(ResVReg, ResType, I, SrcReg, Opcode);
1128}
1129
1130bool SPIRVInstructionSelector::selectExt(Register ResVReg,
1131 const SPIRVType *ResType,
1132 MachineInstr &I, bool IsSigned) const {
1133 if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool))
1134 return selectSelect(ResVReg, ResType, I, IsSigned);
1135 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1136 return selectUnOp(ResVReg, ResType, I, Opcode);
1137}
1138
1139bool SPIRVInstructionSelector::selectIntToBool(Register IntReg,
1140 Register ResVReg,
1141 MachineInstr &I,
1142 const SPIRVType *IntTy,
1143 const SPIRVType *BoolTy) const {
1144 // To truncate to a bool, we use OpBitwiseAnd 1 and OpINotEqual to zero.
1145 Register BitIntReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1146 bool IsVectorTy = IntTy->getOpcode() == SPIRV::OpTypeVector;
1147 unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
1148 Register Zero = buildZerosVal(IntTy, I);
1149 Register One = buildOnesVal(false, IntTy, I);
1150 MachineBasicBlock &BB = *I.getParent();
1151 BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1152 .addDef(BitIntReg)
1153 .addUse(GR.getSPIRVTypeID(IntTy))
1154 .addUse(IntReg)
1155 .addUse(One)
1156 .constrainAllUses(TII, TRI, RBI);
1157 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
1158 .addDef(ResVReg)
1159 .addUse(GR.getSPIRVTypeID(BoolTy))
1160 .addUse(BitIntReg)
1161 .addUse(Zero)
1162 .constrainAllUses(TII, TRI, RBI);
1163}
1164
1165bool SPIRVInstructionSelector::selectTrunc(Register ResVReg,
1166 const SPIRVType *ResType,
1167 MachineInstr &I) const {
1168 if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool)) {
1169 Register IntReg = I.getOperand(1).getReg();
1170 const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
1171 return selectIntToBool(IntReg, ResVReg, I, ArgType, ResType);
1172 }
1173 bool IsSigned = GR.isScalarOrVectorSigned(ResType);
1174 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1175 return selectUnOp(ResVReg, ResType, I, Opcode);
1176}
1177
1178bool SPIRVInstructionSelector::selectConst(Register ResVReg,
1179 const SPIRVType *ResType,
1180 const APInt &Imm,
1181 MachineInstr &I) const {
1182 unsigned TyOpcode = ResType->getOpcode();
1183 assert(TyOpcode != SPIRV::OpTypePointer || Imm.isZero());
1184 MachineBasicBlock &BB = *I.getParent();
1185 if ((TyOpcode == SPIRV::OpTypePointer || TyOpcode == SPIRV::OpTypeEvent) &&
1186 Imm.isZero())
1187 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1188 .addDef(ResVReg)
1189 .addUse(GR.getSPIRVTypeID(ResType))
1190 .constrainAllUses(TII, TRI, RBI);
1191 if (TyOpcode == SPIRV::OpTypeInt) {
1192 assert(Imm.getBitWidth() <= 64 && "Unsupported integer width!");
1193 Register Reg = GR.getOrCreateConstInt(Imm.getZExtValue(), I, ResType, TII);
1194 if (Reg == ResVReg)
1195 return true;
1196 return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
1197 .addDef(ResVReg)
1198 .addUse(Reg)
1199 .constrainAllUses(TII, TRI, RBI);
1200 }
1201 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
1202 .addDef(ResVReg)
1203 .addUse(GR.getSPIRVTypeID(ResType));
1204 // <=32-bit integers should be caught by the sdag pattern.
1205 assert(Imm.getBitWidth() > 32);
1206 addNumImm(Imm, MIB);
1207 return MIB.constrainAllUses(TII, TRI, RBI);
1208}
1209
1210bool SPIRVInstructionSelector::selectOpUndef(Register ResVReg,
1211 const SPIRVType *ResType,
1212 MachineInstr &I) const {
1213 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
1214 .addDef(ResVReg)
1215 .addUse(GR.getSPIRVTypeID(ResType))
1216 .constrainAllUses(TII, TRI, RBI);
1217}
1218
1220 assert(MO.isReg());
1221 const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
1222 if (TypeInst->getOpcode() != SPIRV::ASSIGN_TYPE)
1223 return false;
1224 assert(TypeInst->getOperand(1).isReg());
1225 MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
1226 return ImmInst->getOpcode() == TargetOpcode::G_CONSTANT;
1227}
1228
1229static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
1230 const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
1231 MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
1232 assert(ImmInst->getOpcode() == TargetOpcode::G_CONSTANT);
1233 return ImmInst->getOperand(1).getCImm()->getZExtValue();
1234}
1235
1236bool SPIRVInstructionSelector::selectInsertVal(Register ResVReg,
1237 const SPIRVType *ResType,
1238 MachineInstr &I) const {
1239 MachineBasicBlock &BB = *I.getParent();
1240 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeInsert))
1241 .addDef(ResVReg)
1242 .addUse(GR.getSPIRVTypeID(ResType))
1243 // object to insert
1244 .addUse(I.getOperand(3).getReg())
1245 // composite to insert into
1246 .addUse(I.getOperand(2).getReg());
1247 for (unsigned i = 4; i < I.getNumOperands(); i++)
1248 MIB.addImm(foldImm(I.getOperand(i), MRI));
1249 return MIB.constrainAllUses(TII, TRI, RBI);
1250}
1251
1252bool SPIRVInstructionSelector::selectExtractVal(Register ResVReg,
1253 const SPIRVType *ResType,
1254 MachineInstr &I) const {
1255 MachineBasicBlock &BB = *I.getParent();
1256 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
1257 .addDef(ResVReg)
1258 .addUse(GR.getSPIRVTypeID(ResType))
1259 .addUse(I.getOperand(2).getReg());
1260 for (unsigned i = 3; i < I.getNumOperands(); i++)
1261 MIB.addImm(foldImm(I.getOperand(i), MRI));
1262 return MIB.constrainAllUses(TII, TRI, RBI);
1263}
1264
1265bool SPIRVInstructionSelector::selectInsertElt(Register ResVReg,
1266 const SPIRVType *ResType,
1267 MachineInstr &I) const {
1268 if (isImm(I.getOperand(4), MRI))
1269 return selectInsertVal(ResVReg, ResType, I);
1270 MachineBasicBlock &BB = *I.getParent();
1271 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorInsertDynamic))
1272 .addDef(ResVReg)
1273 .addUse(GR.getSPIRVTypeID(ResType))
1274 .addUse(I.getOperand(2).getReg())
1275 .addUse(I.getOperand(3).getReg())
1276 .addUse(I.getOperand(4).getReg())
1277 .constrainAllUses(TII, TRI, RBI);
1278}
1279
1280bool SPIRVInstructionSelector::selectExtractElt(Register ResVReg,
1281 const SPIRVType *ResType,
1282 MachineInstr &I) const {
1283 if (isImm(I.getOperand(3), MRI))
1284 return selectExtractVal(ResVReg, ResType, I);
1285 MachineBasicBlock &BB = *I.getParent();
1286 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorExtractDynamic))
1287 .addDef(ResVReg)
1288 .addUse(GR.getSPIRVTypeID(ResType))
1289 .addUse(I.getOperand(2).getReg())
1290 .addUse(I.getOperand(3).getReg())
1291 .constrainAllUses(TII, TRI, RBI);
1292}
1293
1294bool SPIRVInstructionSelector::selectGEP(Register ResVReg,
1295 const SPIRVType *ResType,
1296 MachineInstr &I) const {
1297 // In general we should also support OpAccessChain instrs here (i.e. not
1298 // PtrAccessChain) but SPIRV-LLVM Translator doesn't emit them at all and so
1299 // do we to stay compliant with its test and more importantly consumers.
1300 unsigned Opcode = I.getOperand(2).getImm() ? SPIRV::OpInBoundsPtrAccessChain
1301 : SPIRV::OpPtrAccessChain;
1302 auto Res = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
1303 .addDef(ResVReg)
1304 .addUse(GR.getSPIRVTypeID(ResType))
1305 // Object to get a pointer to.
1306 .addUse(I.getOperand(3).getReg());
1307 // Adding indices.
1308 for (unsigned i = 4; i < I.getNumExplicitOperands(); ++i)
1309 Res.addUse(I.getOperand(i).getReg());
1310 return Res.constrainAllUses(TII, TRI, RBI);
1311}
1312
1313bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
1314 const SPIRVType *ResType,
1315 MachineInstr &I) const {
1316 MachineBasicBlock &BB = *I.getParent();
1317 switch (I.getIntrinsicID()) {
1318 case Intrinsic::spv_load:
1319 return selectLoad(ResVReg, ResType, I);
1320 case Intrinsic::spv_store:
1321 return selectStore(I);
1322 case Intrinsic::spv_extractv:
1323 return selectExtractVal(ResVReg, ResType, I);
1324 case Intrinsic::spv_insertv:
1325 return selectInsertVal(ResVReg, ResType, I);
1326 case Intrinsic::spv_extractelt:
1327 return selectExtractElt(ResVReg, ResType, I);
1328 case Intrinsic::spv_insertelt:
1329 return selectInsertElt(ResVReg, ResType, I);
1330 case Intrinsic::spv_gep:
1331 return selectGEP(ResVReg, ResType, I);
1332 case Intrinsic::spv_unref_global:
1333 case Intrinsic::spv_init_global: {
1334 MachineInstr *MI = MRI->getVRegDef(I.getOperand(1).getReg());
1335 MachineInstr *Init = I.getNumExplicitOperands() > 2
1336 ? MRI->getVRegDef(I.getOperand(2).getReg())
1337 : nullptr;
1338 assert(MI);
1339 return selectGlobalValue(MI->getOperand(0).getReg(), *MI, Init);
1340 }
1341 case Intrinsic::spv_undef: {
1342 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
1343 .addDef(ResVReg)
1344 .addUse(GR.getSPIRVTypeID(ResType));
1345 return MIB.constrainAllUses(TII, TRI, RBI);
1346 }
1347 case Intrinsic::spv_const_composite: {
1348 // If no values are attached, the composite is null constant.
1349 bool IsNull = I.getNumExplicitDefs() + 1 == I.getNumExplicitOperands();
1350 unsigned Opcode =
1351 IsNull ? SPIRV::OpConstantNull : SPIRV::OpConstantComposite;
1352 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1353 .addDef(ResVReg)
1354 .addUse(GR.getSPIRVTypeID(ResType));
1355 // skip type MD node we already used when generated assign.type for this
1356 if (!IsNull) {
1357 for (unsigned i = I.getNumExplicitDefs() + 1;
1358 i < I.getNumExplicitOperands(); ++i) {
1359 MIB.addUse(I.getOperand(i).getReg());
1360 }
1361 }
1362 return MIB.constrainAllUses(TII, TRI, RBI);
1363 }
1364 case Intrinsic::spv_assign_name: {
1365 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpName));
1366 MIB.addUse(I.getOperand(I.getNumExplicitDefs() + 1).getReg());
1367 for (unsigned i = I.getNumExplicitDefs() + 2;
1368 i < I.getNumExplicitOperands(); ++i) {
1369 MIB.addImm(I.getOperand(i).getImm());
1370 }
1371 return MIB.constrainAllUses(TII, TRI, RBI);
1372 }
1373 case Intrinsic::spv_switch: {
1374 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSwitch));
1375 for (unsigned i = 1; i < I.getNumExplicitOperands(); ++i) {
1376 if (I.getOperand(i).isReg())
1377 MIB.addReg(I.getOperand(i).getReg());
1378 else if (I.getOperand(i).isCImm())
1379 addNumImm(I.getOperand(i).getCImm()->getValue(), MIB);
1380 else if (I.getOperand(i).isMBB())
1381 MIB.addMBB(I.getOperand(i).getMBB());
1382 else
1383 llvm_unreachable("Unexpected OpSwitch operand");
1384 }
1385 return MIB.constrainAllUses(TII, TRI, RBI);
1386 }
1387 case Intrinsic::spv_cmpxchg:
1388 return selectAtomicCmpXchg(ResVReg, ResType, I);
1389 case Intrinsic::spv_unreachable:
1390 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUnreachable));
1391 break;
1392 case Intrinsic::spv_alloca:
1393 return selectFrameIndex(ResVReg, ResType, I);
1394 default:
1395 llvm_unreachable("Intrinsic selection not implemented");
1396 }
1397 return true;
1398}
1399
1400bool SPIRVInstructionSelector::selectFrameIndex(Register ResVReg,
1401 const SPIRVType *ResType,
1402 MachineInstr &I) const {
1403 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
1404 .addDef(ResVReg)
1405 .addUse(GR.getSPIRVTypeID(ResType))
1406 .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function))
1407 .constrainAllUses(TII, TRI, RBI);
1408}
1409
1410bool SPIRVInstructionSelector::selectBranch(MachineInstr &I) const {
1411 // InstructionSelector walks backwards through the instructions. We can use
1412 // both a G_BR and a G_BRCOND to create an OpBranchConditional. We hit G_BR
1413 // first, so can generate an OpBranchConditional here. If there is no
1414 // G_BRCOND, we just use OpBranch for a regular unconditional branch.
1415 const MachineInstr *PrevI = I.getPrevNode();
1416 MachineBasicBlock &MBB = *I.getParent();
1417 if (PrevI != nullptr && PrevI->getOpcode() == TargetOpcode::G_BRCOND) {
1418 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
1419 .addUse(PrevI->getOperand(0).getReg())
1420 .addMBB(PrevI->getOperand(1).getMBB())
1421 .addMBB(I.getOperand(0).getMBB())
1422 .constrainAllUses(TII, TRI, RBI);
1423 }
1424 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranch))
1425 .addMBB(I.getOperand(0).getMBB())
1426 .constrainAllUses(TII, TRI, RBI);
1427}
1428
1429bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &I) const {
1430 // InstructionSelector walks backwards through the instructions. For an
1431 // explicit conditional branch with no fallthrough, we use both a G_BR and a
1432 // G_BRCOND to create an OpBranchConditional. We should hit G_BR first, and
1433 // generate the OpBranchConditional in selectBranch above.
1434 //
1435 // If an OpBranchConditional has been generated, we simply return, as the work
1436 // is alread done. If there is no OpBranchConditional, LLVM must be relying on
1437 // implicit fallthrough to the next basic block, so we need to create an
1438 // OpBranchConditional with an explicit "false" argument pointing to the next
1439 // basic block that LLVM would fall through to.
1440 const MachineInstr *NextI = I.getNextNode();
1441 // Check if this has already been successfully selected.
1442 if (NextI != nullptr && NextI->getOpcode() == SPIRV::OpBranchConditional)
1443 return true;
1444 // Must be relying on implicit block fallthrough, so generate an
1445 // OpBranchConditional with the "next" basic block as the "false" target.
1446 MachineBasicBlock &MBB = *I.getParent();
1447 unsigned NextMBBNum = MBB.getNextNode()->getNumber();
1448 MachineBasicBlock *NextMBB = I.getMF()->getBlockNumbered(NextMBBNum);
1449 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
1450 .addUse(I.getOperand(0).getReg())
1451 .addMBB(I.getOperand(1).getMBB())
1452 .addMBB(NextMBB)
1453 .constrainAllUses(TII, TRI, RBI);
1454}
1455
1456bool SPIRVInstructionSelector::selectPhi(Register ResVReg,
1457 const SPIRVType *ResType,
1458 MachineInstr &I) const {
1459 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpPhi))
1460 .addDef(ResVReg)
1461 .addUse(GR.getSPIRVTypeID(ResType));
1462 const unsigned NumOps = I.getNumOperands();
1463 for (unsigned i = 1; i < NumOps; i += 2) {
1464 MIB.addUse(I.getOperand(i + 0).getReg());
1465 MIB.addMBB(I.getOperand(i + 1).getMBB());
1466 }
1467 return MIB.constrainAllUses(TII, TRI, RBI);
1468}
1469
1470bool SPIRVInstructionSelector::selectGlobalValue(
1471 Register ResVReg, MachineInstr &I, const MachineInstr *Init) const {
1472 // FIXME: don't use MachineIRBuilder here, replace it with BuildMI.
1473 MachineIRBuilder MIRBuilder(I);
1474 const GlobalValue *GV = I.getOperand(1).getGlobal();
1475 SPIRVType *ResType = GR.getOrCreateSPIRVType(
1476 GV->getType(), MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false);
1477
1478 std::string GlobalIdent = GV->getGlobalIdentifier();
1479 // We have functions as operands in tests with blocks of instruction e.g. in
1480 // transcoding/global_block.ll. These operands are not used and should be
1481 // substituted by zero constants. Their type is expected to be always
1482 // OpTypePointer Function %uchar.
1483 if (isa<Function>(GV)) {
1484 const Constant *ConstVal = GV;
1485 MachineBasicBlock &BB = *I.getParent();
1486 Register NewReg = GR.find(ConstVal, GR.CurMF);
1487 if (!NewReg.isValid()) {
1488 SPIRVType *SpvBaseTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
1489 ResType = GR.getOrCreateSPIRVPointerType(SpvBaseTy, I, TII);
1490 Register NewReg = ResVReg;
1491 GR.add(ConstVal, GR.CurMF, NewReg);
1492 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1493 .addDef(NewReg)
1494 .addUse(GR.getSPIRVTypeID(ResType))
1495 .constrainAllUses(TII, TRI, RBI);
1496 }
1497 assert(NewReg != ResVReg);
1498 return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
1499 .addDef(ResVReg)
1500 .addUse(NewReg)
1501 .constrainAllUses(TII, TRI, RBI);
1502 }
1503 auto GlobalVar = cast<GlobalVariable>(GV);
1504 assert(GlobalVar->getName() != "llvm.global.annotations");
1505
1506 bool HasInit = GlobalVar->hasInitializer() &&
1507 !isa<UndefValue>(GlobalVar->getInitializer());
1508 // Skip empty declaration for GVs with initilaizers till we get the decl with
1509 // passed initializer.
1510 if (HasInit && !Init)
1511 return true;
1512
1513 unsigned AddrSpace = GV->getAddressSpace();
1514 SPIRV::StorageClass::StorageClass Storage =
1515 addressSpaceToStorageClass(AddrSpace);
1516 bool HasLnkTy = GV->getLinkage() != GlobalValue::InternalLinkage &&
1517 Storage != SPIRV::StorageClass::Function;
1518 SPIRV::LinkageType::LinkageType LnkType =
1520 ? SPIRV::LinkageType::Import
1521 : SPIRV::LinkageType::Export;
1522
1523 Register Reg = GR.buildGlobalVariable(ResVReg, ResType, GlobalIdent, GV,
1524 Storage, Init, GlobalVar->isConstant(),
1525 HasLnkTy, LnkType, MIRBuilder, true);
1526 return Reg.isValid();
1527}
1528
1529namespace llvm {
1532 const SPIRVSubtarget &Subtarget,
1533 const RegisterBankInfo &RBI) {
1534 return new SPIRVInstructionSelector(TM, Subtarget, RBI);
1535}
1536} // namespace llvm
unsigned const MachineRegisterInfo * MRI
#define Success
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file declares a class to represent arbitrary precision floating point values and provide a varie...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
#define DEBUG_TYPE
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
const char LLVMTargetMachineRef TM
static StringRef getName(Value *V)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
std::vector< std::pair< SPIRV::InstructionSet::InstructionSet, uint32_t > > ExtInstList
static SPIRV::Scope::Scope getScope(SyncScope::ID Ord)
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static void addMemoryOperands(MachineMemOperand *MemOp, MachineInstrBuilder &MIB)
static unsigned getFCmpOpcode(unsigned PredNum)
bool isTypeFoldingSupported(unsigned Opcode)
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static unsigned getBoolCmpOpcode(unsigned PredNum)
static unsigned getICmpOpcode(unsigned PredNum)
static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
static unsigned getPtrCmpOpcode(unsigned Pred)
@ Flags
Definition: TextStubV5.cpp:93
APInt bitcastToAPInt() const
Definition: APFloat.h:1184
Class for arbitrary precision integers.
Definition: APInt.h:75
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:214
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1498
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:711
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:714
@ ICMP_SLT
signed less than
Definition: InstrTypes.h:740
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:741
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:717
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition: InstrTypes.h:726
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition: InstrTypes.h:715
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition: InstrTypes.h:716
@ ICMP_UGE
unsigned greater or equal
Definition: InstrTypes.h:735
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:734
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:738
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition: InstrTypes.h:725
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:719
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:722
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:736
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition: InstrTypes.h:723
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:718
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:720
@ ICMP_EQ
equal
Definition: InstrTypes.h:732
@ ICMP_NE
not equal
Definition: InstrTypes.h:733
@ ICMP_SGE
signed greater or equal
Definition: InstrTypes.h:739
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:727
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:737
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition: InstrTypes.h:724
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:721
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:260
const APFloat & getValueAPF() const
Definition: Constants.h:296
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:888
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:145
This is an important base class in LLVM.
Definition: Constant.h:41
A debug info location.
Definition: DebugLoc.h:33
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:273
LinkageTypes getLinkage() const
Definition: GlobalValue.h:541
unsigned getAddressSpace() const
Definition: GlobalValue.h:201
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:290
static std::string getGlobalIdentifier(StringRef Name, GlobalValue::LinkageTypes Linkage, StringRef FileName)
Return the modified name for a global value suitable to be used as the key for a global lookup (e....
Definition: Globals.cpp:144
bool hasAvailableExternallyLinkage() const
Definition: GlobalValue.h:507
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:55
Provides the logic to select generic machine instructions.
virtual void setupMF(MachineFunction &mf, GISelKnownBits *KB, CodeGenCoverage &covinfo, ProfileSummaryInfo *psi, BlockFrequencyInfo *bfi)
Setup per-MF selector state.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:339
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Helper class to build MachineInstr.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:68
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:523
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:533
A description of a memory reference used in the backend.
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
Register getReg() const
getReg - Returns the register number.
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Analysis providing profile information.
Holds all the information related to register banks.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isValid() const
Definition: Register.h:116
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:289
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
Definition: LLVMContext.h:54
@ System
Synchronized with respect to all concurrently executing threads.
Definition: LLVMContext.h:57
Reg
All possible values of the reg field in the ModR/M byte.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
Definition: SPIRVUtils.cpp:78
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition: Utils.cpp:152
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
Definition: TargetOpcodes.h:30
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
Definition: SPIRVUtils.cpp:223
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
Definition: SPIRVUtils.cpp:173
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Definition: SPIRVUtils.cpp:112
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace)
Definition: SPIRVUtils.cpp:153
bool isSpvIntrinsic(MachineInstr &MI, Intrinsic::ID IntrinsicID)
Definition: SPIRVUtils.cpp:229
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
InstructionSelector * createSPIRVInstructionSelector(const SPIRVTargetMachine &TM, const SPIRVSubtarget &Subtarget, const RegisterBankInfo &RBI)
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:184
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
Definition: SPIRVUtils.cpp:191