LLVM 18.0.0git
SPIRVInstructionSelector.cpp
Go to the documentation of this file.
1//===- SPIRVInstructionSelector.cpp ------------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the targeting of the InstructionSelector class for
10// SPIRV.
11// TODO: This should be generated by TableGen.
12//
13//===----------------------------------------------------------------------===//
14
15#include "SPIRV.h"
16#include "SPIRVGlobalRegistry.h"
17#include "SPIRVInstrInfo.h"
19#include "SPIRVRegisterInfo.h"
20#include "SPIRVTargetMachine.h"
21#include "SPIRVUtils.h"
22#include "llvm/ADT/APFloat.h"
28#include "llvm/IR/IntrinsicsSPIRV.h"
29#include "llvm/Support/Debug.h"
30
31#define DEBUG_TYPE "spirv-isel"
32
33using namespace llvm;
34namespace CL = SPIRV::OpenCLExtInst;
35namespace GL = SPIRV::GLSLExtInst;
36
38 std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
39
40namespace {
41
42#define GET_GLOBALISEL_PREDICATE_BITSET
43#include "SPIRVGenGlobalISel.inc"
44#undef GET_GLOBALISEL_PREDICATE_BITSET
45
46class SPIRVInstructionSelector : public InstructionSelector {
47 const SPIRVSubtarget &STI;
48 const SPIRVInstrInfo &TII;
50 const RegisterBankInfo &RBI;
53
54public:
55 SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
56 const SPIRVSubtarget &ST,
57 const RegisterBankInfo &RBI);
58 void setupMF(MachineFunction &MF, GISelKnownBits *KB,
59 CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
60 BlockFrequencyInfo *BFI) override;
61 // Common selection code. Instruction-specific selection occurs in spvSelect.
62 bool select(MachineInstr &I) override;
63 static const char *getName() { return DEBUG_TYPE; }
64
65#define GET_GLOBALISEL_PREDICATES_DECL
66#include "SPIRVGenGlobalISel.inc"
67#undef GET_GLOBALISEL_PREDICATES_DECL
68
69#define GET_GLOBALISEL_TEMPORARIES_DECL
70#include "SPIRVGenGlobalISel.inc"
71#undef GET_GLOBALISEL_TEMPORARIES_DECL
72
73private:
74 // tblgen-erated 'select' implementation, used as the initial selector for
75 // the patterns that don't require complex C++.
76 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
77
78 // All instruction-specific selection that didn't happen in "select()".
79 // Is basically a large Switch/Case delegating to all other select method.
80 bool spvSelect(Register ResVReg, const SPIRVType *ResType,
81 MachineInstr &I) const;
82
83 bool selectGlobalValue(Register ResVReg, MachineInstr &I,
84 const MachineInstr *Init = nullptr) const;
85
86 bool selectUnOpWithSrc(Register ResVReg, const SPIRVType *ResType,
87 MachineInstr &I, Register SrcReg,
88 unsigned Opcode) const;
89 bool selectUnOp(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
90 unsigned Opcode) const;
91
92 bool selectLoad(Register ResVReg, const SPIRVType *ResType,
93 MachineInstr &I) const;
94 bool selectStore(MachineInstr &I) const;
95
96 bool selectMemOperation(Register ResVReg, MachineInstr &I) const;
97
98 bool selectAtomicRMW(Register ResVReg, const SPIRVType *ResType,
99 MachineInstr &I, unsigned NewOpcode) const;
100
101 bool selectAtomicCmpXchg(Register ResVReg, const SPIRVType *ResType,
102 MachineInstr &I) const;
103
104 bool selectFence(MachineInstr &I) const;
105
106 bool selectAddrSpaceCast(Register ResVReg, const SPIRVType *ResType,
107 MachineInstr &I) const;
108
109 bool selectBitreverse(Register ResVReg, const SPIRVType *ResType,
110 MachineInstr &I) const;
111
112 bool selectConstVector(Register ResVReg, const SPIRVType *ResType,
113 MachineInstr &I) const;
114
115 bool selectCmp(Register ResVReg, const SPIRVType *ResType,
116 unsigned comparisonOpcode, MachineInstr &I) const;
117
118 bool selectICmp(Register ResVReg, const SPIRVType *ResType,
119 MachineInstr &I) const;
120 bool selectFCmp(Register ResVReg, const SPIRVType *ResType,
121 MachineInstr &I) const;
122
123 void renderImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
124 int OpIdx) const;
125 void renderFImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
126 int OpIdx) const;
127
128 bool selectConst(Register ResVReg, const SPIRVType *ResType, const APInt &Imm,
129 MachineInstr &I) const;
130
131 bool selectSelect(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
132 bool IsSigned) const;
133 bool selectIToF(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
134 bool IsSigned, unsigned Opcode) const;
135 bool selectExt(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
136 bool IsSigned) const;
137
138 bool selectTrunc(Register ResVReg, const SPIRVType *ResType,
139 MachineInstr &I) const;
140
141 bool selectIntToBool(Register IntReg, Register ResVReg, MachineInstr &I,
142 const SPIRVType *intTy, const SPIRVType *boolTy) const;
143
144 bool selectOpUndef(Register ResVReg, const SPIRVType *ResType,
145 MachineInstr &I) const;
146 bool selectIntrinsic(Register ResVReg, const SPIRVType *ResType,
147 MachineInstr &I) const;
148 bool selectExtractVal(Register ResVReg, const SPIRVType *ResType,
149 MachineInstr &I) const;
150 bool selectInsertVal(Register ResVReg, const SPIRVType *ResType,
151 MachineInstr &I) const;
152 bool selectExtractElt(Register ResVReg, const SPIRVType *ResType,
153 MachineInstr &I) const;
154 bool selectInsertElt(Register ResVReg, const SPIRVType *ResType,
155 MachineInstr &I) const;
156 bool selectGEP(Register ResVReg, const SPIRVType *ResType,
157 MachineInstr &I) const;
158
159 bool selectFrameIndex(Register ResVReg, const SPIRVType *ResType,
160 MachineInstr &I) const;
161
162 bool selectBranch(MachineInstr &I) const;
163 bool selectBranchCond(MachineInstr &I) const;
164
165 bool selectPhi(Register ResVReg, const SPIRVType *ResType,
166 MachineInstr &I) const;
167
168 bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
169 MachineInstr &I, CL::OpenCLExtInst CLInst) const;
170 bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
171 MachineInstr &I, CL::OpenCLExtInst CLInst,
172 GL::GLSLExtInst GLInst) const;
173 bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
174 MachineInstr &I, const ExtInstList &ExtInsts) const;
175
176 Register buildI32Constant(uint32_t Val, MachineInstr &I,
177 const SPIRVType *ResType = nullptr) const;
178
179 Register buildZerosVal(const SPIRVType *ResType, MachineInstr &I) const;
180 Register buildOnesVal(bool AllOnes, const SPIRVType *ResType,
181 MachineInstr &I) const;
182};
183
184} // end anonymous namespace
185
186#define GET_GLOBALISEL_IMPL
187#include "SPIRVGenGlobalISel.inc"
188#undef GET_GLOBALISEL_IMPL
189
190SPIRVInstructionSelector::SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
191 const SPIRVSubtarget &ST,
192 const RegisterBankInfo &RBI)
193 : InstructionSelector(), STI(ST), TII(*ST.getInstrInfo()),
194 TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
196#include "SPIRVGenGlobalISel.inc"
199#include "SPIRVGenGlobalISel.inc"
201{
202}
203
204void SPIRVInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
205 CodeGenCoverage *CoverageInfo,
207 BlockFrequencyInfo *BFI) {
208 MRI = &MF.getRegInfo();
209 GR.setCurrentFunc(MF);
210 InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
211}
212
213static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI);
214
215// Defined in SPIRVLegalizerInfo.cpp.
216extern bool isTypeFoldingSupported(unsigned Opcode);
217
218bool SPIRVInstructionSelector::select(MachineInstr &I) {
219 assert(I.getParent() && "Instruction should be in a basic block!");
220 assert(I.getParent()->getParent() && "Instruction should be in a function!");
221
222 Register Opcode = I.getOpcode();
223 // If it's not a GMIR instruction, we've selected it already.
224 if (!isPreISelGenericOpcode(Opcode)) {
225 if (Opcode == SPIRV::ASSIGN_TYPE) { // These pseudos aren't needed any more.
226 auto *Def = MRI->getVRegDef(I.getOperand(1).getReg());
227 if (isTypeFoldingSupported(Def->getOpcode())) {
228 auto Res = selectImpl(I, *CoverageInfo);
229 assert(Res || Def->getOpcode() == TargetOpcode::G_CONSTANT);
230 if (Res)
231 return Res;
232 }
233 MRI->replaceRegWith(I.getOperand(1).getReg(), I.getOperand(0).getReg());
234 I.removeFromParent();
235 return true;
236 } else if (I.getNumDefs() == 1) {
237 // Make all vregs 32 bits (for SPIR-V IDs).
238 MRI->setType(I.getOperand(0).getReg(), LLT::scalar(32));
239 }
241 }
242
243 if (I.getNumOperands() != I.getNumExplicitOperands()) {
244 LLVM_DEBUG(errs() << "Generic instr has unexpected implicit operands\n");
245 return false;
246 }
247
248 // Common code for getting return reg+type, and removing selected instr
249 // from parent occurs here. Instr-specific selection happens in spvSelect().
250 bool HasDefs = I.getNumDefs() > 0;
251 Register ResVReg = HasDefs ? I.getOperand(0).getReg() : Register(0);
252 SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) : nullptr;
253 assert(!HasDefs || ResType || I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
254 if (spvSelect(ResVReg, ResType, I)) {
255 if (HasDefs) // Make all vregs 32 bits (for SPIR-V IDs).
256 MRI->setType(ResVReg, LLT::scalar(32));
257 I.removeFromParent();
258 return true;
259 }
260 return false;
261}
262
263bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
264 const SPIRVType *ResType,
265 MachineInstr &I) const {
266 assert(!isTypeFoldingSupported(I.getOpcode()) ||
267 I.getOpcode() == TargetOpcode::G_CONSTANT);
268 const unsigned Opcode = I.getOpcode();
269 switch (Opcode) {
270 case TargetOpcode::G_CONSTANT:
271 return selectConst(ResVReg, ResType, I.getOperand(1).getCImm()->getValue(),
272 I);
273 case TargetOpcode::G_GLOBAL_VALUE:
274 return selectGlobalValue(ResVReg, I);
275 case TargetOpcode::G_IMPLICIT_DEF:
276 return selectOpUndef(ResVReg, ResType, I);
277
278 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
279 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
280 return selectIntrinsic(ResVReg, ResType, I);
281 case TargetOpcode::G_BITREVERSE:
282 return selectBitreverse(ResVReg, ResType, I);
283
284 case TargetOpcode::G_BUILD_VECTOR:
285 return selectConstVector(ResVReg, ResType, I);
286
287 case TargetOpcode::G_SHUFFLE_VECTOR: {
288 MachineBasicBlock &BB = *I.getParent();
289 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorShuffle))
290 .addDef(ResVReg)
291 .addUse(GR.getSPIRVTypeID(ResType))
292 .addUse(I.getOperand(1).getReg())
293 .addUse(I.getOperand(2).getReg());
294 for (auto V : I.getOperand(3).getShuffleMask())
295 MIB.addImm(V);
296 return MIB.constrainAllUses(TII, TRI, RBI);
297 }
298 case TargetOpcode::G_MEMMOVE:
299 case TargetOpcode::G_MEMCPY:
300 case TargetOpcode::G_MEMSET:
301 return selectMemOperation(ResVReg, I);
302
303 case TargetOpcode::G_ICMP:
304 return selectICmp(ResVReg, ResType, I);
305 case TargetOpcode::G_FCMP:
306 return selectFCmp(ResVReg, ResType, I);
307
308 case TargetOpcode::G_FRAME_INDEX:
309 return selectFrameIndex(ResVReg, ResType, I);
310
311 case TargetOpcode::G_LOAD:
312 return selectLoad(ResVReg, ResType, I);
313 case TargetOpcode::G_STORE:
314 return selectStore(I);
315
316 case TargetOpcode::G_BR:
317 return selectBranch(I);
318 case TargetOpcode::G_BRCOND:
319 return selectBranchCond(I);
320
321 case TargetOpcode::G_PHI:
322 return selectPhi(ResVReg, ResType, I);
323
324 case TargetOpcode::G_FPTOSI:
325 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToS);
326 case TargetOpcode::G_FPTOUI:
327 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToU);
328
329 case TargetOpcode::G_SITOFP:
330 return selectIToF(ResVReg, ResType, I, true, SPIRV::OpConvertSToF);
331 case TargetOpcode::G_UITOFP:
332 return selectIToF(ResVReg, ResType, I, false, SPIRV::OpConvertUToF);
333
334 case TargetOpcode::G_CTPOP:
335 return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitCount);
336 case TargetOpcode::G_SMIN:
337 return selectExtInst(ResVReg, ResType, I, CL::s_min, GL::SMin);
338 case TargetOpcode::G_UMIN:
339 return selectExtInst(ResVReg, ResType, I, CL::u_min, GL::UMin);
340
341 case TargetOpcode::G_SMAX:
342 return selectExtInst(ResVReg, ResType, I, CL::s_max, GL::SMax);
343 case TargetOpcode::G_UMAX:
344 return selectExtInst(ResVReg, ResType, I, CL::u_max, GL::UMax);
345
346 case TargetOpcode::G_FMA:
347 return selectExtInst(ResVReg, ResType, I, CL::fma, GL::Fma);
348
349 case TargetOpcode::G_FPOW:
350 return selectExtInst(ResVReg, ResType, I, CL::pow, GL::Pow);
351 case TargetOpcode::G_FPOWI:
352 return selectExtInst(ResVReg, ResType, I, CL::pown);
353
354 case TargetOpcode::G_FEXP:
355 return selectExtInst(ResVReg, ResType, I, CL::exp, GL::Exp);
356 case TargetOpcode::G_FEXP2:
357 return selectExtInst(ResVReg, ResType, I, CL::exp2, GL::Exp2);
358
359 case TargetOpcode::G_FLOG:
360 return selectExtInst(ResVReg, ResType, I, CL::log, GL::Log);
361 case TargetOpcode::G_FLOG2:
362 return selectExtInst(ResVReg, ResType, I, CL::log2, GL::Log2);
363 case TargetOpcode::G_FLOG10:
364 return selectExtInst(ResVReg, ResType, I, CL::log10);
365
366 case TargetOpcode::G_FABS:
367 return selectExtInst(ResVReg, ResType, I, CL::fabs, GL::FAbs);
368 case TargetOpcode::G_ABS:
369 return selectExtInst(ResVReg, ResType, I, CL::s_abs, GL::SAbs);
370
371 case TargetOpcode::G_FMINNUM:
372 case TargetOpcode::G_FMINIMUM:
373 return selectExtInst(ResVReg, ResType, I, CL::fmin, GL::FMin);
374 case TargetOpcode::G_FMAXNUM:
375 case TargetOpcode::G_FMAXIMUM:
376 return selectExtInst(ResVReg, ResType, I, CL::fmax, GL::FMax);
377
378 case TargetOpcode::G_FCOPYSIGN:
379 return selectExtInst(ResVReg, ResType, I, CL::copysign);
380
381 case TargetOpcode::G_FCEIL:
382 return selectExtInst(ResVReg, ResType, I, CL::ceil, GL::Ceil);
383 case TargetOpcode::G_FFLOOR:
384 return selectExtInst(ResVReg, ResType, I, CL::floor, GL::Floor);
385
386 case TargetOpcode::G_FCOS:
387 return selectExtInst(ResVReg, ResType, I, CL::cos, GL::Cos);
388 case TargetOpcode::G_FSIN:
389 return selectExtInst(ResVReg, ResType, I, CL::sin, GL::Sin);
390
391 case TargetOpcode::G_FSQRT:
392 return selectExtInst(ResVReg, ResType, I, CL::sqrt, GL::Sqrt);
393
394 case TargetOpcode::G_CTTZ:
395 case TargetOpcode::G_CTTZ_ZERO_UNDEF:
396 return selectExtInst(ResVReg, ResType, I, CL::ctz);
397 case TargetOpcode::G_CTLZ:
398 case TargetOpcode::G_CTLZ_ZERO_UNDEF:
399 return selectExtInst(ResVReg, ResType, I, CL::clz);
400
401 case TargetOpcode::G_INTRINSIC_ROUND:
402 return selectExtInst(ResVReg, ResType, I, CL::round, GL::Round);
403 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
404 return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
405 case TargetOpcode::G_INTRINSIC_TRUNC:
406 return selectExtInst(ResVReg, ResType, I, CL::trunc, GL::Trunc);
407 case TargetOpcode::G_FRINT:
408 case TargetOpcode::G_FNEARBYINT:
409 return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
410
411 case TargetOpcode::G_SMULH:
412 return selectExtInst(ResVReg, ResType, I, CL::s_mul_hi);
413 case TargetOpcode::G_UMULH:
414 return selectExtInst(ResVReg, ResType, I, CL::u_mul_hi);
415
416 case TargetOpcode::G_SEXT:
417 return selectExt(ResVReg, ResType, I, true);
418 case TargetOpcode::G_ANYEXT:
419 case TargetOpcode::G_ZEXT:
420 return selectExt(ResVReg, ResType, I, false);
421 case TargetOpcode::G_TRUNC:
422 return selectTrunc(ResVReg, ResType, I);
423 case TargetOpcode::G_FPTRUNC:
424 case TargetOpcode::G_FPEXT:
425 return selectUnOp(ResVReg, ResType, I, SPIRV::OpFConvert);
426
427 case TargetOpcode::G_PTRTOINT:
428 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertPtrToU);
429 case TargetOpcode::G_INTTOPTR:
430 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertUToPtr);
431 case TargetOpcode::G_BITCAST:
432 return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
433 case TargetOpcode::G_ADDRSPACE_CAST:
434 return selectAddrSpaceCast(ResVReg, ResType, I);
435 case TargetOpcode::G_PTR_ADD: {
436 // Currently, we get G_PTR_ADD only as a result of translating
437 // global variables, initialized with constant expressions like GV + Const
438 // (see test opencl/basic/progvar_prog_scope_init.ll).
439 // TODO: extend the handler once we have other cases.
440 assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
441 Register GV = I.getOperand(1).getReg();
442 MachineRegisterInfo::def_instr_iterator II = MRI->def_instr_begin(GV);
443 assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
444 (*II).getOpcode() == TargetOpcode::COPY ||
445 (*II).getOpcode() == SPIRV::OpVariable) &&
446 isImm(I.getOperand(2), MRI));
447 Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32, I, TII), I);
448 MachineBasicBlock &BB = *I.getParent();
449 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
450 .addDef(ResVReg)
451 .addUse(GR.getSPIRVTypeID(ResType))
452 .addImm(static_cast<uint32_t>(
453 SPIRV::Opcode::InBoundsPtrAccessChain))
454 .addUse(GV)
455 .addUse(Idx)
456 .addUse(I.getOperand(2).getReg());
457 return MIB.constrainAllUses(TII, TRI, RBI);
458 }
459
460 case TargetOpcode::G_ATOMICRMW_OR:
461 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicOr);
462 case TargetOpcode::G_ATOMICRMW_ADD:
463 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicIAdd);
464 case TargetOpcode::G_ATOMICRMW_AND:
465 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicAnd);
466 case TargetOpcode::G_ATOMICRMW_MAX:
467 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMax);
468 case TargetOpcode::G_ATOMICRMW_MIN:
469 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMin);
470 case TargetOpcode::G_ATOMICRMW_SUB:
471 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicISub);
472 case TargetOpcode::G_ATOMICRMW_XOR:
473 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicXor);
474 case TargetOpcode::G_ATOMICRMW_UMAX:
475 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMax);
476 case TargetOpcode::G_ATOMICRMW_UMIN:
477 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMin);
478 case TargetOpcode::G_ATOMICRMW_XCHG:
479 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicExchange);
480 case TargetOpcode::G_ATOMIC_CMPXCHG:
481 return selectAtomicCmpXchg(ResVReg, ResType, I);
482
483 case TargetOpcode::G_FENCE:
484 return selectFence(I);
485
486 default:
487 return false;
488 }
489}
490
491bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
492 const SPIRVType *ResType,
494 CL::OpenCLExtInst CLInst) const {
495 return selectExtInst(ResVReg, ResType, I,
496 {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
497}
498
499bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
500 const SPIRVType *ResType,
502 CL::OpenCLExtInst CLInst,
503 GL::GLSLExtInst GLInst) const {
504 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
505 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
506 return selectExtInst(ResVReg, ResType, I, ExtInsts);
507}
508
509bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
510 const SPIRVType *ResType,
512 const ExtInstList &Insts) const {
513
514 for (const auto &Ex : Insts) {
515 SPIRV::InstructionSet::InstructionSet Set = Ex.first;
516 uint32_t Opcode = Ex.second;
517 if (STI.canUseExtInstSet(Set)) {
518 MachineBasicBlock &BB = *I.getParent();
519 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
520 .addDef(ResVReg)
521 .addUse(GR.getSPIRVTypeID(ResType))
522 .addImm(static_cast<uint32_t>(Set))
523 .addImm(Opcode);
524 const unsigned NumOps = I.getNumOperands();
525 for (unsigned i = 1; i < NumOps; ++i)
526 MIB.add(I.getOperand(i));
527 return MIB.constrainAllUses(TII, TRI, RBI);
528 }
529 }
530 return false;
531}
532
533bool SPIRVInstructionSelector::selectUnOpWithSrc(Register ResVReg,
534 const SPIRVType *ResType,
536 Register SrcReg,
537 unsigned Opcode) const {
538 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
539 .addDef(ResVReg)
540 .addUse(GR.getSPIRVTypeID(ResType))
541 .addUse(SrcReg)
542 .constrainAllUses(TII, TRI, RBI);
543}
544
545bool SPIRVInstructionSelector::selectUnOp(Register ResVReg,
546 const SPIRVType *ResType,
548 unsigned Opcode) const {
549 return selectUnOpWithSrc(ResVReg, ResType, I, I.getOperand(1).getReg(),
550 Opcode);
551}
552
553static SPIRV::Scope::Scope getScope(SyncScope::ID Ord) {
554 switch (Ord) {
556 return SPIRV::Scope::Invocation;
558 return SPIRV::Scope::Device;
559 default:
560 llvm_unreachable("Unsupported synchronization Scope ID.");
561 }
562}
563
565 MachineInstrBuilder &MIB) {
566 uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
567 if (MemOp->isVolatile())
568 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
569 if (MemOp->isNonTemporal())
570 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
571 if (MemOp->getAlign().value())
572 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
573
574 if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
575 MIB.addImm(SpvMemOp);
576 if (SpvMemOp & static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
577 MIB.addImm(MemOp->getAlign().value());
578 }
579}
580
582 uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
583 if (Flags & MachineMemOperand::Flags::MOVolatile)
584 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
585 if (Flags & MachineMemOperand::Flags::MONonTemporal)
586 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
587
588 if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None))
589 MIB.addImm(SpvMemOp);
590}
591
592bool SPIRVInstructionSelector::selectLoad(Register ResVReg,
593 const SPIRVType *ResType,
594 MachineInstr &I) const {
595 unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
596 Register Ptr = I.getOperand(1 + OpOffset).getReg();
597 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
598 .addDef(ResVReg)
599 .addUse(GR.getSPIRVTypeID(ResType))
600 .addUse(Ptr);
601 if (!I.getNumMemOperands()) {
602 assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
603 I.getOpcode() ==
604 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
605 addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
606 } else {
607 addMemoryOperands(*I.memoperands_begin(), MIB);
608 }
609 return MIB.constrainAllUses(TII, TRI, RBI);
610}
611
612bool SPIRVInstructionSelector::selectStore(MachineInstr &I) const {
613 unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
614 Register StoreVal = I.getOperand(0 + OpOffset).getReg();
615 Register Ptr = I.getOperand(1 + OpOffset).getReg();
616 MachineBasicBlock &BB = *I.getParent();
617 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpStore))
618 .addUse(Ptr)
619 .addUse(StoreVal);
620 if (!I.getNumMemOperands()) {
621 assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
622 I.getOpcode() ==
623 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
624 addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
625 } else {
626 addMemoryOperands(*I.memoperands_begin(), MIB);
627 }
628 return MIB.constrainAllUses(TII, TRI, RBI);
629}
630
631bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
632 MachineInstr &I) const {
633 MachineBasicBlock &BB = *I.getParent();
634 Register SrcReg = I.getOperand(1).getReg();
635 if (I.getOpcode() == TargetOpcode::G_MEMSET) {
636 assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
637 unsigned Val = getIConstVal(I.getOperand(1).getReg(), MRI);
638 unsigned Num = getIConstVal(I.getOperand(2).getReg(), MRI);
639 SPIRVType *ValTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
640 SPIRVType *ArrTy = GR.getOrCreateSPIRVArrayType(ValTy, Num, I, TII);
641 Register Const = GR.getOrCreateConsIntArray(Val, I, ArrTy, TII);
642 SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType(
643 ArrTy, I, TII, SPIRV::StorageClass::UniformConstant);
644 // TODO: check if we have such GV, add init, use buildGlobalVariable.
645 Type *LLVMArrTy = ArrayType::get(
646 IntegerType::get(GR.CurMF->getFunction().getContext(), 8), Num);
647 GlobalVariable *GV =
648 new GlobalVariable(LLVMArrTy, true, GlobalValue::InternalLinkage);
649 Register VarReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
650 GR.add(GV, GR.CurMF, VarReg);
651
652 buildOpDecorate(VarReg, I, TII, SPIRV::Decoration::Constant, {});
653 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
654 .addDef(VarReg)
655 .addUse(GR.getSPIRVTypeID(VarTy))
656 .addImm(SPIRV::StorageClass::UniformConstant)
657 .addUse(Const)
658 .constrainAllUses(TII, TRI, RBI);
659 SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType(
660 ValTy, I, TII, SPIRV::StorageClass::UniformConstant);
661 SrcReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
662 selectUnOpWithSrc(SrcReg, SourceTy, I, VarReg, SPIRV::OpBitcast);
663 }
664 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCopyMemorySized))
665 .addUse(I.getOperand(0).getReg())
666 .addUse(SrcReg)
667 .addUse(I.getOperand(2).getReg());
668 if (I.getNumMemOperands())
669 addMemoryOperands(*I.memoperands_begin(), MIB);
670 bool Result = MIB.constrainAllUses(TII, TRI, RBI);
671 if (ResVReg.isValid() && ResVReg != MIB->getOperand(0).getReg())
672 BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg)
673 .addUse(MIB->getOperand(0).getReg());
674 return Result;
675}
676
677bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
678 const SPIRVType *ResType,
680 unsigned NewOpcode) const {
681 assert(I.hasOneMemOperand());
682 const MachineMemOperand *MemOp = *I.memoperands_begin();
683 uint32_t Scope = static_cast<uint32_t>(getScope(MemOp->getSyncScopeID()));
684 Register ScopeReg = buildI32Constant(Scope, I);
685
686 Register Ptr = I.getOperand(1).getReg();
687 // TODO: Changed as it's implemented in the translator. See test/atomicrmw.ll
688 // auto ScSem =
689 // getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr));
690 AtomicOrdering AO = MemOp->getSuccessOrdering();
691 uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
692 Register MemSemReg = buildI32Constant(MemSem /*| ScSem*/, I);
693
694 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode))
695 .addDef(ResVReg)
696 .addUse(GR.getSPIRVTypeID(ResType))
697 .addUse(Ptr)
698 .addUse(ScopeReg)
699 .addUse(MemSemReg)
700 .addUse(I.getOperand(2).getReg())
701 .constrainAllUses(TII, TRI, RBI);
702}
703
704bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const {
705 AtomicOrdering AO = AtomicOrdering(I.getOperand(0).getImm());
706 uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
707 Register MemSemReg = buildI32Constant(MemSem, I);
708 SyncScope::ID Ord = SyncScope::ID(I.getOperand(1).getImm());
709 uint32_t Scope = static_cast<uint32_t>(getScope(Ord));
710 Register ScopeReg = buildI32Constant(Scope, I);
711 MachineBasicBlock &BB = *I.getParent();
712 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier))
713 .addUse(ScopeReg)
714 .addUse(MemSemReg)
715 .constrainAllUses(TII, TRI, RBI);
716}
717
718bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
719 const SPIRVType *ResType,
720 MachineInstr &I) const {
721 Register ScopeReg;
722 Register MemSemEqReg;
723 Register MemSemNeqReg;
724 Register Ptr = I.getOperand(2).getReg();
725 if (!isa<GIntrinsic>(I)) {
726 assert(I.hasOneMemOperand());
727 const MachineMemOperand *MemOp = *I.memoperands_begin();
728 unsigned Scope = static_cast<uint32_t>(getScope(MemOp->getSyncScopeID()));
729 ScopeReg = buildI32Constant(Scope, I);
730
731 unsigned ScSem = static_cast<uint32_t>(
732 getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr)));
733 AtomicOrdering AO = MemOp->getSuccessOrdering();
734 unsigned MemSemEq = static_cast<uint32_t>(getMemSemantics(AO)) | ScSem;
735 MemSemEqReg = buildI32Constant(MemSemEq, I);
736 AtomicOrdering FO = MemOp->getFailureOrdering();
737 unsigned MemSemNeq = static_cast<uint32_t>(getMemSemantics(FO)) | ScSem;
738 MemSemNeqReg =
739 MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq, I);
740 } else {
741 ScopeReg = I.getOperand(5).getReg();
742 MemSemEqReg = I.getOperand(6).getReg();
743 MemSemNeqReg = I.getOperand(7).getReg();
744 }
745
746 Register Cmp = I.getOperand(3).getReg();
747 Register Val = I.getOperand(4).getReg();
748 SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
749 Register ACmpRes = MRI->createVirtualRegister(&SPIRV::IDRegClass);
750 const DebugLoc &DL = I.getDebugLoc();
751 bool Result =
752 BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange))
753 .addDef(ACmpRes)
754 .addUse(GR.getSPIRVTypeID(SpvValTy))
755 .addUse(Ptr)
756 .addUse(ScopeReg)
757 .addUse(MemSemEqReg)
758 .addUse(MemSemNeqReg)
759 .addUse(Val)
760 .addUse(Cmp)
761 .constrainAllUses(TII, TRI, RBI);
762 Register CmpSuccReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
763 SPIRVType *BoolTy = GR.getOrCreateSPIRVBoolType(I, TII);
764 Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpIEqual))
765 .addDef(CmpSuccReg)
766 .addUse(GR.getSPIRVTypeID(BoolTy))
767 .addUse(ACmpRes)
768 .addUse(Cmp)
769 .constrainAllUses(TII, TRI, RBI);
770 Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
771 Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
772 .addDef(TmpReg)
773 .addUse(GR.getSPIRVTypeID(ResType))
774 .addUse(ACmpRes)
775 .addUse(GR.getOrCreateUndef(I, ResType, TII))
776 .addImm(0)
777 .constrainAllUses(TII, TRI, RBI);
778 Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
779 .addDef(ResVReg)
780 .addUse(GR.getSPIRVTypeID(ResType))
781 .addUse(CmpSuccReg)
782 .addUse(TmpReg)
783 .addImm(1)
784 .constrainAllUses(TII, TRI, RBI);
785 return Result;
786}
787
788static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC) {
789 switch (SC) {
790 case SPIRV::StorageClass::Workgroup:
791 case SPIRV::StorageClass::CrossWorkgroup:
792 case SPIRV::StorageClass::Function:
793 return true;
794 default:
795 return false;
796 }
797}
798
799// In SPIR-V address space casting can only happen to and from the Generic
800// storage class. We can also only case Workgroup, CrossWorkgroup, or Function
801// pointers to and from Generic pointers. As such, we can convert e.g. from
802// Workgroup to Function by going via a Generic pointer as an intermediary. All
803// other combinations can only be done by a bitcast, and are probably not safe.
804bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg,
805 const SPIRVType *ResType,
806 MachineInstr &I) const {
807 // If the AddrSpaceCast user is single and in OpConstantComposite or
808 // OpVariable, we should select OpSpecConstantOp.
809 auto UIs = MRI->use_instructions(ResVReg);
810 if (!UIs.empty() && ++UIs.begin() == UIs.end() &&
811 (UIs.begin()->getOpcode() == SPIRV::OpConstantComposite ||
812 UIs.begin()->getOpcode() == SPIRV::OpVariable ||
813 isSpvIntrinsic(*UIs.begin(), Intrinsic::spv_init_global))) {
814 Register NewReg = I.getOperand(1).getReg();
815 MachineBasicBlock &BB = *I.getParent();
816 SPIRVType *SpvBaseTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
817 ResType = GR.getOrCreateSPIRVPointerType(SpvBaseTy, I, TII,
818 SPIRV::StorageClass::Generic);
819 bool Result =
820 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
821 .addDef(ResVReg)
822 .addUse(GR.getSPIRVTypeID(ResType))
823 .addImm(static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric))
824 .addUse(NewReg)
825 .constrainAllUses(TII, TRI, RBI);
826 return Result;
827 }
828 Register SrcPtr = I.getOperand(1).getReg();
829 SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
830 SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtr);
831 SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResVReg);
832
833 // Casting from an eligable pointer to Generic.
834 if (DstSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(SrcSC))
835 return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric);
836 // Casting from Generic to an eligable pointer.
837 if (SrcSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(DstSC))
838 return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr);
839 // Casting between 2 eligable pointers using Generic as an intermediary.
840 if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) {
841 Register Tmp = MRI->createVirtualRegister(&SPIRV::IDRegClass);
842 SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
843 SrcPtrTy, I, TII, SPIRV::StorageClass::Generic);
844 MachineBasicBlock &BB = *I.getParent();
845 const DebugLoc &DL = I.getDebugLoc();
846 bool Success = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric))
847 .addDef(Tmp)
848 .addUse(GR.getSPIRVTypeID(GenericPtrTy))
849 .addUse(SrcPtr)
850 .constrainAllUses(TII, TRI, RBI);
851 return Success && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr))
852 .addDef(ResVReg)
853 .addUse(GR.getSPIRVTypeID(ResType))
854 .addUse(Tmp)
855 .constrainAllUses(TII, TRI, RBI);
856 }
857 // TODO Should this case just be disallowed completely?
858 // We're casting 2 other arbitrary address spaces, so have to bitcast.
859 return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
860}
861
862static unsigned getFCmpOpcode(unsigned PredNum) {
863 auto Pred = static_cast<CmpInst::Predicate>(PredNum);
864 switch (Pred) {
866 return SPIRV::OpFOrdEqual;
868 return SPIRV::OpFOrdGreaterThanEqual;
870 return SPIRV::OpFOrdGreaterThan;
872 return SPIRV::OpFOrdLessThanEqual;
874 return SPIRV::OpFOrdLessThan;
876 return SPIRV::OpFOrdNotEqual;
878 return SPIRV::OpOrdered;
880 return SPIRV::OpFUnordEqual;
882 return SPIRV::OpFUnordGreaterThanEqual;
884 return SPIRV::OpFUnordGreaterThan;
886 return SPIRV::OpFUnordLessThanEqual;
888 return SPIRV::OpFUnordLessThan;
890 return SPIRV::OpFUnordNotEqual;
892 return SPIRV::OpUnordered;
893 default:
894 llvm_unreachable("Unknown predicate type for FCmp");
895 }
896}
897
898static unsigned getICmpOpcode(unsigned PredNum) {
899 auto Pred = static_cast<CmpInst::Predicate>(PredNum);
900 switch (Pred) {
901 case CmpInst::ICMP_EQ:
902 return SPIRV::OpIEqual;
903 case CmpInst::ICMP_NE:
904 return SPIRV::OpINotEqual;
906 return SPIRV::OpSGreaterThanEqual;
908 return SPIRV::OpSGreaterThan;
910 return SPIRV::OpSLessThanEqual;
912 return SPIRV::OpSLessThan;
914 return SPIRV::OpUGreaterThanEqual;
916 return SPIRV::OpUGreaterThan;
918 return SPIRV::OpULessThanEqual;
920 return SPIRV::OpULessThan;
921 default:
922 llvm_unreachable("Unknown predicate type for ICmp");
923 }
924}
925
926static unsigned getPtrCmpOpcode(unsigned Pred) {
927 switch (static_cast<CmpInst::Predicate>(Pred)) {
928 case CmpInst::ICMP_EQ:
929 return SPIRV::OpPtrEqual;
930 case CmpInst::ICMP_NE:
931 return SPIRV::OpPtrNotEqual;
932 default:
933 llvm_unreachable("Unknown predicate type for pointer comparison");
934 }
935}
936
937// Return the logical operation, or abort if none exists.
938static unsigned getBoolCmpOpcode(unsigned PredNum) {
939 auto Pred = static_cast<CmpInst::Predicate>(PredNum);
940 switch (Pred) {
941 case CmpInst::ICMP_EQ:
942 return SPIRV::OpLogicalEqual;
943 case CmpInst::ICMP_NE:
944 return SPIRV::OpLogicalNotEqual;
945 default:
946 llvm_unreachable("Unknown predicate type for Bool comparison");
947 }
948}
949
950bool SPIRVInstructionSelector::selectBitreverse(Register ResVReg,
951 const SPIRVType *ResType,
952 MachineInstr &I) const {
953 MachineBasicBlock &BB = *I.getParent();
954 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitReverse))
955 .addDef(ResVReg)
956 .addUse(GR.getSPIRVTypeID(ResType))
957 .addUse(I.getOperand(1).getReg())
958 .constrainAllUses(TII, TRI, RBI);
959}
960
961bool SPIRVInstructionSelector::selectConstVector(Register ResVReg,
962 const SPIRVType *ResType,
963 MachineInstr &I) const {
964 // TODO: only const case is supported for now.
965 assert(std::all_of(
966 I.operands_begin(), I.operands_end(), [this](const MachineOperand &MO) {
967 if (MO.isDef())
968 return true;
969 if (!MO.isReg())
970 return false;
971 SPIRVType *ConstTy = this->MRI->getVRegDef(MO.getReg());
972 assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
973 ConstTy->getOperand(1).isReg());
974 Register ConstReg = ConstTy->getOperand(1).getReg();
975 const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
976 assert(Const);
977 return (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
978 Const->getOpcode() == TargetOpcode::G_FCONSTANT);
979 }));
980
981 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
982 TII.get(SPIRV::OpConstantComposite))
983 .addDef(ResVReg)
984 .addUse(GR.getSPIRVTypeID(ResType));
985 for (unsigned i = I.getNumExplicitDefs(); i < I.getNumExplicitOperands(); ++i)
986 MIB.addUse(I.getOperand(i).getReg());
987 return MIB.constrainAllUses(TII, TRI, RBI);
988}
989
990bool SPIRVInstructionSelector::selectCmp(Register ResVReg,
991 const SPIRVType *ResType,
992 unsigned CmpOpc,
993 MachineInstr &I) const {
994 Register Cmp0 = I.getOperand(2).getReg();
995 Register Cmp1 = I.getOperand(3).getReg();
996 assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
997 GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
998 "CMP operands should have the same type");
999 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CmpOpc))
1000 .addDef(ResVReg)
1001 .addUse(GR.getSPIRVTypeID(ResType))
1002 .addUse(Cmp0)
1003 .addUse(Cmp1)
1004 .constrainAllUses(TII, TRI, RBI);
1005}
1006
1007bool SPIRVInstructionSelector::selectICmp(Register ResVReg,
1008 const SPIRVType *ResType,
1009 MachineInstr &I) const {
1010 auto Pred = I.getOperand(1).getPredicate();
1011 unsigned CmpOpc;
1012
1013 Register CmpOperand = I.getOperand(2).getReg();
1014 if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
1015 CmpOpc = getPtrCmpOpcode(Pred);
1016 else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
1017 CmpOpc = getBoolCmpOpcode(Pred);
1018 else
1019 CmpOpc = getICmpOpcode(Pred);
1020 return selectCmp(ResVReg, ResType, CmpOpc, I);
1021}
1022
1023void SPIRVInstructionSelector::renderFImm32(MachineInstrBuilder &MIB,
1024 const MachineInstr &I,
1025 int OpIdx) const {
1026 assert(I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
1027 "Expected G_FCONSTANT");
1028 const ConstantFP *FPImm = I.getOperand(1).getFPImm();
1029 addNumImm(FPImm->getValueAPF().bitcastToAPInt(), MIB);
1030}
1031
1032void SPIRVInstructionSelector::renderImm32(MachineInstrBuilder &MIB,
1033 const MachineInstr &I,
1034 int OpIdx) const {
1035 assert(I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1036 "Expected G_CONSTANT");
1037 addNumImm(I.getOperand(1).getCImm()->getValue(), MIB);
1038}
1039
1041SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I,
1042 const SPIRVType *ResType) const {
1043 Type *LLVMTy = IntegerType::get(GR.CurMF->getFunction().getContext(), 32);
1044 const SPIRVType *SpvI32Ty =
1045 ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32, I, TII);
1046 // Find a constant in DT or build a new one.
1047 auto ConstInt = ConstantInt::get(LLVMTy, Val);
1048 Register NewReg = GR.find(ConstInt, GR.CurMF);
1049 if (!NewReg.isValid()) {
1050 NewReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
1051 GR.add(ConstInt, GR.CurMF, NewReg);
1053 MachineBasicBlock &BB = *I.getParent();
1054 if (Val == 0) {
1055 MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1056 .addDef(NewReg)
1057 .addUse(GR.getSPIRVTypeID(SpvI32Ty));
1058 } else {
1059 MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
1060 .addDef(NewReg)
1061 .addUse(GR.getSPIRVTypeID(SpvI32Ty))
1062 .addImm(APInt(32, Val).getZExtValue());
1063 }
1065 }
1066 return NewReg;
1067}
1068
1069bool SPIRVInstructionSelector::selectFCmp(Register ResVReg,
1070 const SPIRVType *ResType,
1071 MachineInstr &I) const {
1072 unsigned CmpOp = getFCmpOpcode(I.getOperand(1).getPredicate());
1073 return selectCmp(ResVReg, ResType, CmpOp, I);
1074}
1075
1076Register SPIRVInstructionSelector::buildZerosVal(const SPIRVType *ResType,
1077 MachineInstr &I) const {
1078 if (ResType->getOpcode() == SPIRV::OpTypeVector)
1079 return GR.getOrCreateConsIntVector(0, I, ResType, TII);
1080 return GR.getOrCreateConstInt(0, I, ResType, TII);
1081}
1082
1083Register SPIRVInstructionSelector::buildOnesVal(bool AllOnes,
1084 const SPIRVType *ResType,
1085 MachineInstr &I) const {
1086 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1087 APInt One =
1088 AllOnes ? APInt::getAllOnes(BitWidth) : APInt::getOneBitSet(BitWidth, 0);
1089 if (ResType->getOpcode() == SPIRV::OpTypeVector)
1090 return GR.getOrCreateConsIntVector(One.getZExtValue(), I, ResType, TII);
1091 return GR.getOrCreateConstInt(One.getZExtValue(), I, ResType, TII);
1092}
1093
1094bool SPIRVInstructionSelector::selectSelect(Register ResVReg,
1095 const SPIRVType *ResType,
1096 MachineInstr &I,
1097 bool IsSigned) const {
1098 // To extend a bool, we need to use OpSelect between constants.
1099 Register ZeroReg = buildZerosVal(ResType, I);
1100 Register OneReg = buildOnesVal(IsSigned, ResType, I);
1101 bool IsScalarBool =
1102 GR.isScalarOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool);
1103 unsigned Opcode =
1104 IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectSIVCond;
1105 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
1106 .addDef(ResVReg)
1107 .addUse(GR.getSPIRVTypeID(ResType))
1108 .addUse(I.getOperand(1).getReg())
1109 .addUse(OneReg)
1110 .addUse(ZeroReg)
1111 .constrainAllUses(TII, TRI, RBI);
1112}
1113
1114bool SPIRVInstructionSelector::selectIToF(Register ResVReg,
1115 const SPIRVType *ResType,
1116 MachineInstr &I, bool IsSigned,
1117 unsigned Opcode) const {
1118 Register SrcReg = I.getOperand(1).getReg();
1119 // We can convert bool value directly to float type without OpConvert*ToF,
1120 // however the translator generates OpSelect+OpConvert*ToF, so we do the same.
1121 if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
1122 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1123 SPIRVType *TmpType = GR.getOrCreateSPIRVIntegerType(BitWidth, I, TII);
1124 if (ResType->getOpcode() == SPIRV::OpTypeVector) {
1125 const unsigned NumElts = ResType->getOperand(2).getImm();
1126 TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII);
1127 }
1128 SrcReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1129 selectSelect(SrcReg, TmpType, I, false);
1130 }
1131 return selectUnOpWithSrc(ResVReg, ResType, I, SrcReg, Opcode);
1132}
1133
1134bool SPIRVInstructionSelector::selectExt(Register ResVReg,
1135 const SPIRVType *ResType,
1136 MachineInstr &I, bool IsSigned) const {
1137 if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool))
1138 return selectSelect(ResVReg, ResType, I, IsSigned);
1139 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1140 return selectUnOp(ResVReg, ResType, I, Opcode);
1141}
1142
1143bool SPIRVInstructionSelector::selectIntToBool(Register IntReg,
1144 Register ResVReg,
1145 MachineInstr &I,
1146 const SPIRVType *IntTy,
1147 const SPIRVType *BoolTy) const {
1148 // To truncate to a bool, we use OpBitwiseAnd 1 and OpINotEqual to zero.
1149 Register BitIntReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1150 bool IsVectorTy = IntTy->getOpcode() == SPIRV::OpTypeVector;
1151 unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
1152 Register Zero = buildZerosVal(IntTy, I);
1153 Register One = buildOnesVal(false, IntTy, I);
1154 MachineBasicBlock &BB = *I.getParent();
1155 BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1156 .addDef(BitIntReg)
1157 .addUse(GR.getSPIRVTypeID(IntTy))
1158 .addUse(IntReg)
1159 .addUse(One)
1160 .constrainAllUses(TII, TRI, RBI);
1161 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
1162 .addDef(ResVReg)
1163 .addUse(GR.getSPIRVTypeID(BoolTy))
1164 .addUse(BitIntReg)
1165 .addUse(Zero)
1166 .constrainAllUses(TII, TRI, RBI);
1167}
1168
1169bool SPIRVInstructionSelector::selectTrunc(Register ResVReg,
1170 const SPIRVType *ResType,
1171 MachineInstr &I) const {
1172 if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool)) {
1173 Register IntReg = I.getOperand(1).getReg();
1174 const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
1175 return selectIntToBool(IntReg, ResVReg, I, ArgType, ResType);
1176 }
1177 bool IsSigned = GR.isScalarOrVectorSigned(ResType);
1178 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1179 return selectUnOp(ResVReg, ResType, I, Opcode);
1180}
1181
1182bool SPIRVInstructionSelector::selectConst(Register ResVReg,
1183 const SPIRVType *ResType,
1184 const APInt &Imm,
1185 MachineInstr &I) const {
1186 unsigned TyOpcode = ResType->getOpcode();
1187 assert(TyOpcode != SPIRV::OpTypePointer || Imm.isZero());
1188 MachineBasicBlock &BB = *I.getParent();
1189 if ((TyOpcode == SPIRV::OpTypePointer || TyOpcode == SPIRV::OpTypeEvent) &&
1190 Imm.isZero())
1191 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1192 .addDef(ResVReg)
1193 .addUse(GR.getSPIRVTypeID(ResType))
1194 .constrainAllUses(TII, TRI, RBI);
1195 if (TyOpcode == SPIRV::OpTypeInt) {
1196 assert(Imm.getBitWidth() <= 64 && "Unsupported integer width!");
1197 Register Reg = GR.getOrCreateConstInt(Imm.getZExtValue(), I, ResType, TII);
1198 if (Reg == ResVReg)
1199 return true;
1200 return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
1201 .addDef(ResVReg)
1202 .addUse(Reg)
1203 .constrainAllUses(TII, TRI, RBI);
1204 }
1205 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
1206 .addDef(ResVReg)
1207 .addUse(GR.getSPIRVTypeID(ResType));
1208 // <=32-bit integers should be caught by the sdag pattern.
1209 assert(Imm.getBitWidth() > 32);
1210 addNumImm(Imm, MIB);
1211 return MIB.constrainAllUses(TII, TRI, RBI);
1212}
1213
1214bool SPIRVInstructionSelector::selectOpUndef(Register ResVReg,
1215 const SPIRVType *ResType,
1216 MachineInstr &I) const {
1217 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
1218 .addDef(ResVReg)
1219 .addUse(GR.getSPIRVTypeID(ResType))
1220 .constrainAllUses(TII, TRI, RBI);
1221}
1222
1224 assert(MO.isReg());
1225 const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
1226 if (TypeInst->getOpcode() != SPIRV::ASSIGN_TYPE)
1227 return false;
1228 assert(TypeInst->getOperand(1).isReg());
1229 MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
1230 return ImmInst->getOpcode() == TargetOpcode::G_CONSTANT;
1231}
1232
1233static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
1234 const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
1235 MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
1236 assert(ImmInst->getOpcode() == TargetOpcode::G_CONSTANT);
1237 return ImmInst->getOperand(1).getCImm()->getZExtValue();
1238}
1239
1240bool SPIRVInstructionSelector::selectInsertVal(Register ResVReg,
1241 const SPIRVType *ResType,
1242 MachineInstr &I) const {
1243 MachineBasicBlock &BB = *I.getParent();
1244 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeInsert))
1245 .addDef(ResVReg)
1246 .addUse(GR.getSPIRVTypeID(ResType))
1247 // object to insert
1248 .addUse(I.getOperand(3).getReg())
1249 // composite to insert into
1250 .addUse(I.getOperand(2).getReg());
1251 for (unsigned i = 4; i < I.getNumOperands(); i++)
1252 MIB.addImm(foldImm(I.getOperand(i), MRI));
1253 return MIB.constrainAllUses(TII, TRI, RBI);
1254}
1255
1256bool SPIRVInstructionSelector::selectExtractVal(Register ResVReg,
1257 const SPIRVType *ResType,
1258 MachineInstr &I) const {
1259 MachineBasicBlock &BB = *I.getParent();
1260 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
1261 .addDef(ResVReg)
1262 .addUse(GR.getSPIRVTypeID(ResType))
1263 .addUse(I.getOperand(2).getReg());
1264 for (unsigned i = 3; i < I.getNumOperands(); i++)
1265 MIB.addImm(foldImm(I.getOperand(i), MRI));
1266 return MIB.constrainAllUses(TII, TRI, RBI);
1267}
1268
1269bool SPIRVInstructionSelector::selectInsertElt(Register ResVReg,
1270 const SPIRVType *ResType,
1271 MachineInstr &I) const {
1272 if (isImm(I.getOperand(4), MRI))
1273 return selectInsertVal(ResVReg, ResType, I);
1274 MachineBasicBlock &BB = *I.getParent();
1275 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorInsertDynamic))
1276 .addDef(ResVReg)
1277 .addUse(GR.getSPIRVTypeID(ResType))
1278 .addUse(I.getOperand(2).getReg())
1279 .addUse(I.getOperand(3).getReg())
1280 .addUse(I.getOperand(4).getReg())
1281 .constrainAllUses(TII, TRI, RBI);
1282}
1283
1284bool SPIRVInstructionSelector::selectExtractElt(Register ResVReg,
1285 const SPIRVType *ResType,
1286 MachineInstr &I) const {
1287 if (isImm(I.getOperand(3), MRI))
1288 return selectExtractVal(ResVReg, ResType, I);
1289 MachineBasicBlock &BB = *I.getParent();
1290 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorExtractDynamic))
1291 .addDef(ResVReg)
1292 .addUse(GR.getSPIRVTypeID(ResType))
1293 .addUse(I.getOperand(2).getReg())
1294 .addUse(I.getOperand(3).getReg())
1295 .constrainAllUses(TII, TRI, RBI);
1296}
1297
1298bool SPIRVInstructionSelector::selectGEP(Register ResVReg,
1299 const SPIRVType *ResType,
1300 MachineInstr &I) const {
1301 // In general we should also support OpAccessChain instrs here (i.e. not
1302 // PtrAccessChain) but SPIRV-LLVM Translator doesn't emit them at all and so
1303 // do we to stay compliant with its test and more importantly consumers.
1304 unsigned Opcode = I.getOperand(2).getImm() ? SPIRV::OpInBoundsPtrAccessChain
1305 : SPIRV::OpPtrAccessChain;
1306 auto Res = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
1307 .addDef(ResVReg)
1308 .addUse(GR.getSPIRVTypeID(ResType))
1309 // Object to get a pointer to.
1310 .addUse(I.getOperand(3).getReg());
1311 // Adding indices.
1312 for (unsigned i = 4; i < I.getNumExplicitOperands(); ++i)
1313 Res.addUse(I.getOperand(i).getReg());
1314 return Res.constrainAllUses(TII, TRI, RBI);
1315}
1316
1317bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
1318 const SPIRVType *ResType,
1319 MachineInstr &I) const {
1320 MachineBasicBlock &BB = *I.getParent();
1321 switch (cast<GIntrinsic>(I).getIntrinsicID()) {
1322 case Intrinsic::spv_load:
1323 return selectLoad(ResVReg, ResType, I);
1324 case Intrinsic::spv_store:
1325 return selectStore(I);
1326 case Intrinsic::spv_extractv:
1327 return selectExtractVal(ResVReg, ResType, I);
1328 case Intrinsic::spv_insertv:
1329 return selectInsertVal(ResVReg, ResType, I);
1330 case Intrinsic::spv_extractelt:
1331 return selectExtractElt(ResVReg, ResType, I);
1332 case Intrinsic::spv_insertelt:
1333 return selectInsertElt(ResVReg, ResType, I);
1334 case Intrinsic::spv_gep:
1335 return selectGEP(ResVReg, ResType, I);
1336 case Intrinsic::spv_unref_global:
1337 case Intrinsic::spv_init_global: {
1338 MachineInstr *MI = MRI->getVRegDef(I.getOperand(1).getReg());
1339 MachineInstr *Init = I.getNumExplicitOperands() > 2
1340 ? MRI->getVRegDef(I.getOperand(2).getReg())
1341 : nullptr;
1342 assert(MI);
1343 return selectGlobalValue(MI->getOperand(0).getReg(), *MI, Init);
1344 }
1345 case Intrinsic::spv_undef: {
1346 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
1347 .addDef(ResVReg)
1348 .addUse(GR.getSPIRVTypeID(ResType));
1349 return MIB.constrainAllUses(TII, TRI, RBI);
1350 }
1351 case Intrinsic::spv_const_composite: {
1352 // If no values are attached, the composite is null constant.
1353 bool IsNull = I.getNumExplicitDefs() + 1 == I.getNumExplicitOperands();
1354 unsigned Opcode =
1355 IsNull ? SPIRV::OpConstantNull : SPIRV::OpConstantComposite;
1356 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1357 .addDef(ResVReg)
1358 .addUse(GR.getSPIRVTypeID(ResType));
1359 // skip type MD node we already used when generated assign.type for this
1360 if (!IsNull) {
1361 for (unsigned i = I.getNumExplicitDefs() + 1;
1362 i < I.getNumExplicitOperands(); ++i) {
1363 MIB.addUse(I.getOperand(i).getReg());
1364 }
1365 }
1366 return MIB.constrainAllUses(TII, TRI, RBI);
1367 }
1368 case Intrinsic::spv_assign_name: {
1369 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpName));
1370 MIB.addUse(I.getOperand(I.getNumExplicitDefs() + 1).getReg());
1371 for (unsigned i = I.getNumExplicitDefs() + 2;
1372 i < I.getNumExplicitOperands(); ++i) {
1373 MIB.addImm(I.getOperand(i).getImm());
1374 }
1375 return MIB.constrainAllUses(TII, TRI, RBI);
1376 }
1377 case Intrinsic::spv_switch: {
1378 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSwitch));
1379 for (unsigned i = 1; i < I.getNumExplicitOperands(); ++i) {
1380 if (I.getOperand(i).isReg())
1381 MIB.addReg(I.getOperand(i).getReg());
1382 else if (I.getOperand(i).isCImm())
1383 addNumImm(I.getOperand(i).getCImm()->getValue(), MIB);
1384 else if (I.getOperand(i).isMBB())
1385 MIB.addMBB(I.getOperand(i).getMBB());
1386 else
1387 llvm_unreachable("Unexpected OpSwitch operand");
1388 }
1389 return MIB.constrainAllUses(TII, TRI, RBI);
1390 }
1391 case Intrinsic::spv_cmpxchg:
1392 return selectAtomicCmpXchg(ResVReg, ResType, I);
1393 case Intrinsic::spv_unreachable:
1394 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUnreachable));
1395 break;
1396 case Intrinsic::spv_alloca:
1397 return selectFrameIndex(ResVReg, ResType, I);
1398 default:
1399 llvm_unreachable("Intrinsic selection not implemented");
1400 }
1401 return true;
1402}
1403
1404bool SPIRVInstructionSelector::selectFrameIndex(Register ResVReg,
1405 const SPIRVType *ResType,
1406 MachineInstr &I) const {
1407 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
1408 .addDef(ResVReg)
1409 .addUse(GR.getSPIRVTypeID(ResType))
1410 .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function))
1411 .constrainAllUses(TII, TRI, RBI);
1412}
1413
1414bool SPIRVInstructionSelector::selectBranch(MachineInstr &I) const {
1415 // InstructionSelector walks backwards through the instructions. We can use
1416 // both a G_BR and a G_BRCOND to create an OpBranchConditional. We hit G_BR
1417 // first, so can generate an OpBranchConditional here. If there is no
1418 // G_BRCOND, we just use OpBranch for a regular unconditional branch.
1419 const MachineInstr *PrevI = I.getPrevNode();
1420 MachineBasicBlock &MBB = *I.getParent();
1421 if (PrevI != nullptr && PrevI->getOpcode() == TargetOpcode::G_BRCOND) {
1422 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
1423 .addUse(PrevI->getOperand(0).getReg())
1424 .addMBB(PrevI->getOperand(1).getMBB())
1425 .addMBB(I.getOperand(0).getMBB())
1426 .constrainAllUses(TII, TRI, RBI);
1427 }
1428 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranch))
1429 .addMBB(I.getOperand(0).getMBB())
1430 .constrainAllUses(TII, TRI, RBI);
1431}
1432
1433bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &I) const {
1434 // InstructionSelector walks backwards through the instructions. For an
1435 // explicit conditional branch with no fallthrough, we use both a G_BR and a
1436 // G_BRCOND to create an OpBranchConditional. We should hit G_BR first, and
1437 // generate the OpBranchConditional in selectBranch above.
1438 //
1439 // If an OpBranchConditional has been generated, we simply return, as the work
1440 // is alread done. If there is no OpBranchConditional, LLVM must be relying on
1441 // implicit fallthrough to the next basic block, so we need to create an
1442 // OpBranchConditional with an explicit "false" argument pointing to the next
1443 // basic block that LLVM would fall through to.
1444 const MachineInstr *NextI = I.getNextNode();
1445 // Check if this has already been successfully selected.
1446 if (NextI != nullptr && NextI->getOpcode() == SPIRV::OpBranchConditional)
1447 return true;
1448 // Must be relying on implicit block fallthrough, so generate an
1449 // OpBranchConditional with the "next" basic block as the "false" target.
1450 MachineBasicBlock &MBB = *I.getParent();
1451 unsigned NextMBBNum = MBB.getNextNode()->getNumber();
1452 MachineBasicBlock *NextMBB = I.getMF()->getBlockNumbered(NextMBBNum);
1453 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
1454 .addUse(I.getOperand(0).getReg())
1455 .addMBB(I.getOperand(1).getMBB())
1456 .addMBB(NextMBB)
1457 .constrainAllUses(TII, TRI, RBI);
1458}
1459
1460bool SPIRVInstructionSelector::selectPhi(Register ResVReg,
1461 const SPIRVType *ResType,
1462 MachineInstr &I) const {
1463 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpPhi))
1464 .addDef(ResVReg)
1465 .addUse(GR.getSPIRVTypeID(ResType));
1466 const unsigned NumOps = I.getNumOperands();
1467 for (unsigned i = 1; i < NumOps; i += 2) {
1468 MIB.addUse(I.getOperand(i + 0).getReg());
1469 MIB.addMBB(I.getOperand(i + 1).getMBB());
1470 }
1471 return MIB.constrainAllUses(TII, TRI, RBI);
1472}
1473
1474bool SPIRVInstructionSelector::selectGlobalValue(
1475 Register ResVReg, MachineInstr &I, const MachineInstr *Init) const {
1476 // FIXME: don't use MachineIRBuilder here, replace it with BuildMI.
1477 MachineIRBuilder MIRBuilder(I);
1478 const GlobalValue *GV = I.getOperand(1).getGlobal();
1479 Type *GVType = GV->getValueType();
1480 SPIRVType *PointerBaseType;
1481 if (GVType->isArrayTy()) {
1482 SPIRVType *ArrayElementType =
1483 GR.getOrCreateSPIRVType(GVType->getArrayElementType(), MIRBuilder,
1484 SPIRV::AccessQualifier::ReadWrite, false);
1485 PointerBaseType = GR.getOrCreateSPIRVArrayType(
1486 ArrayElementType, GVType->getArrayNumElements(), I, TII);
1487 } else {
1488 PointerBaseType = GR.getOrCreateSPIRVType(
1489 GVType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false);
1490 }
1491 SPIRVType *ResType = GR.getOrCreateSPIRVPointerType(
1492 PointerBaseType, I, TII,
1494 std::string GlobalIdent = GV->getGlobalIdentifier();
1495 // We have functions as operands in tests with blocks of instruction e.g. in
1496 // transcoding/global_block.ll. These operands are not used and should be
1497 // substituted by zero constants. Their type is expected to be always
1498 // OpTypePointer Function %uchar.
1499 if (isa<Function>(GV)) {
1500 const Constant *ConstVal = GV;
1501 MachineBasicBlock &BB = *I.getParent();
1502 Register NewReg = GR.find(ConstVal, GR.CurMF);
1503 if (!NewReg.isValid()) {
1504 Register NewReg = ResVReg;
1505 GR.add(ConstVal, GR.CurMF, NewReg);
1506 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1507 .addDef(NewReg)
1508 .addUse(GR.getSPIRVTypeID(ResType))
1509 .constrainAllUses(TII, TRI, RBI);
1510 }
1511 assert(NewReg != ResVReg);
1512 return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
1513 .addDef(ResVReg)
1514 .addUse(NewReg)
1515 .constrainAllUses(TII, TRI, RBI);
1516 }
1517 auto GlobalVar = cast<GlobalVariable>(GV);
1518 assert(GlobalVar->getName() != "llvm.global.annotations");
1519
1520 bool HasInit = GlobalVar->hasInitializer() &&
1521 !isa<UndefValue>(GlobalVar->getInitializer());
1522 // Skip empty declaration for GVs with initilaizers till we get the decl with
1523 // passed initializer.
1524 if (HasInit && !Init)
1525 return true;
1526
1527 unsigned AddrSpace = GV->getAddressSpace();
1528 SPIRV::StorageClass::StorageClass Storage =
1529 addressSpaceToStorageClass(AddrSpace);
1530 bool HasLnkTy = GV->getLinkage() != GlobalValue::InternalLinkage &&
1531 Storage != SPIRV::StorageClass::Function;
1532 SPIRV::LinkageType::LinkageType LnkType =
1534 ? SPIRV::LinkageType::Import
1535 : SPIRV::LinkageType::Export;
1536
1537 Register Reg = GR.buildGlobalVariable(ResVReg, ResType, GlobalIdent, GV,
1538 Storage, Init, GlobalVar->isConstant(),
1539 HasLnkTy, LnkType, MIRBuilder, true);
1540 return Reg.isValid();
1541}
1542
1543namespace llvm {
1546 const SPIRVSubtarget &Subtarget,
1547 const RegisterBankInfo &RBI) {
1548 return new SPIRVInstructionSelector(TM, Subtarget, RBI);
1549}
1550} // namespace llvm
unsigned const MachineRegisterInfo * MRI
#define Success
static unsigned getIntrinsicID(const SDNode *N)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file declares a class to represent arbitrary precision floating point values and provide a varie...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
#define DEBUG_TYPE
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
const char LLVMTargetMachineRef TM
static StringRef getName(Value *V)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
std::vector< std::pair< SPIRV::InstructionSet::InstructionSet, uint32_t > > ExtInstList
static SPIRV::Scope::Scope getScope(SyncScope::ID Ord)
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static void addMemoryOperands(MachineMemOperand *MemOp, MachineInstrBuilder &MIB)
static unsigned getFCmpOpcode(unsigned PredNum)
bool isTypeFoldingSupported(unsigned Opcode)
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static unsigned getBoolCmpOpcode(unsigned PredNum)
static unsigned getICmpOpcode(unsigned PredNum)
static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
static unsigned getPtrCmpOpcode(unsigned Pred)
APInt bitcastToAPInt() const
Definition: APFloat.h:1208
Class for arbitrary precision integers.
Definition: APInt.h:76
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:212
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1485
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:711
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:714
@ ICMP_SLT
signed less than
Definition: InstrTypes.h:740
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:741
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:717
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition: InstrTypes.h:726
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition: InstrTypes.h:715
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition: InstrTypes.h:716
@ ICMP_UGE
unsigned greater or equal
Definition: InstrTypes.h:735
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:734
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:738
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition: InstrTypes.h:725
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:719
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:722
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:736
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition: InstrTypes.h:723
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:718
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:720
@ ICMP_EQ
equal
Definition: InstrTypes.h:732
@ ICMP_NE
not equal
Definition: InstrTypes.h:733
@ ICMP_SGE
signed greater or equal
Definition: InstrTypes.h:739
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:727
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:737
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition: InstrTypes.h:724
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:721
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:260
const APFloat & getValueAPF() const
Definition: Constants.h:296
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:888
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:145
This is an important base class in LLVM.
Definition: Constant.h:41
A debug info location.
Definition: DebugLoc.h:33
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:273
LinkageTypes getLinkage() const
Definition: GlobalValue.h:541
unsigned getAddressSpace() const
Definition: GlobalValue.h:201
static std::string getGlobalIdentifier(StringRef Name, GlobalValue::LinkageTypes Linkage, StringRef FileName)
Return the modified name for a global value suitable to be used as the key for a global lookup (e....
Definition: Globals.cpp:144
bool hasAvailableExternallyLinkage() const
Definition: GlobalValue.h:507
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:55
Type * getValueType() const
Definition: GlobalValue.h:292
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:279
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Helper class to build MachineInstr.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:68
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:543
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:553
A description of a memory reference used in the backend.
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
Register getReg() const
getReg - Returns the register number.
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Analysis providing profile information.
Holds all the information related to register banks.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isValid() const
Definition: Register.h:116
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition: Type.h:252
Type * getArrayElementType() const
Definition: Type.h:404
uint64_t getArrayNumElements() const
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:289
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
Definition: LLVMContext.h:54
@ System
Synchronized with respect to all concurrently executing threads.
Definition: LLVMContext.h:57
Reg
All possible values of the reg field in the ModR/M byte.
NodeAddr< DefNode * > Def
Definition: RDFGraph.h:384
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
Definition: SPIRVUtils.cpp:79
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition: Utils.cpp:152
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
Definition: TargetOpcodes.h:30
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
Definition: SPIRVUtils.cpp:225
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
Definition: SPIRVUtils.cpp:174
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Definition: SPIRVUtils.cpp:113
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace)
Definition: SPIRVUtils.cpp:154
bool isSpvIntrinsic(MachineInstr &MI, Intrinsic::ID IntrinsicID)
Definition: SPIRVUtils.cpp:231
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
InstructionSelector * createSPIRVInstructionSelector(const SPIRVTargetMachine &TM, const SPIRVSubtarget &Subtarget, const RegisterBankInfo &RBI)
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:184
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
Definition: SPIRVUtils.cpp:192