LLVM 19.0.0git
SPIRVInstructionSelector.cpp
Go to the documentation of this file.
1//===- SPIRVInstructionSelector.cpp ------------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the targeting of the InstructionSelector class for
10// SPIRV.
11// TODO: This should be generated by TableGen.
12//
13//===----------------------------------------------------------------------===//
14
17#include "SPIRV.h"
18#include "SPIRVGlobalRegistry.h"
19#include "SPIRVInstrInfo.h"
21#include "SPIRVRegisterInfo.h"
22#include "SPIRVTargetMachine.h"
23#include "SPIRVUtils.h"
24#include "llvm/ADT/APFloat.h"
32#include "llvm/IR/IntrinsicsSPIRV.h"
33#include "llvm/Support/Debug.h"
34
35namespace llvm {
36
38public:
44
46 LLVMContext &CTX = MMI.getModule()->getContext();
47 Work_ItemSSID = CTX.getOrInsertSyncScopeID("work_item");
48 WorkGroupSSID = CTX.getOrInsertSyncScopeID("workgroup");
49 DeviceSSID = CTX.getOrInsertSyncScopeID("device");
50 AllSVMDevicesSSID = CTX.getOrInsertSyncScopeID("all_svm_devices");
51 SubGroupSSID = CTX.getOrInsertSyncScopeID("sub_group");
52 }
53};
54
55} // end namespace llvm
56
57#define DEBUG_TYPE "spirv-isel"
58
59using namespace llvm;
60namespace CL = SPIRV::OpenCLExtInst;
61namespace GL = SPIRV::GLSLExtInst;
62
64 std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
65
66namespace {
67
68#define GET_GLOBALISEL_PREDICATE_BITSET
69#include "SPIRVGenGlobalISel.inc"
70#undef GET_GLOBALISEL_PREDICATE_BITSET
71
72class SPIRVInstructionSelector : public InstructionSelector {
73 const SPIRVSubtarget &STI;
74 const SPIRVInstrInfo &TII;
76 const RegisterBankInfo &RBI;
79 SPIRVMachineModuleInfo *MMI = nullptr;
80
81 /// We need to keep track of the number we give to anonymous global values to
82 /// generate the same name every time when this is needed.
83 mutable DenseMap<const GlobalValue *, unsigned> UnnamedGlobalIDs;
84
85public:
86 SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
87 const SPIRVSubtarget &ST,
88 const RegisterBankInfo &RBI);
89 void setupMF(MachineFunction &MF, GISelKnownBits *KB,
90 CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
91 BlockFrequencyInfo *BFI) override;
92 // Common selection code. Instruction-specific selection occurs in spvSelect.
93 bool select(MachineInstr &I) override;
94 static const char *getName() { return DEBUG_TYPE; }
95
96#define GET_GLOBALISEL_PREDICATES_DECL
97#include "SPIRVGenGlobalISel.inc"
98#undef GET_GLOBALISEL_PREDICATES_DECL
99
100#define GET_GLOBALISEL_TEMPORARIES_DECL
101#include "SPIRVGenGlobalISel.inc"
102#undef GET_GLOBALISEL_TEMPORARIES_DECL
103
104private:
105 // tblgen-erated 'select' implementation, used as the initial selector for
106 // the patterns that don't require complex C++.
107 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
108
109 // All instruction-specific selection that didn't happen in "select()".
110 // Is basically a large Switch/Case delegating to all other select method.
111 bool spvSelect(Register ResVReg, const SPIRVType *ResType,
112 MachineInstr &I) const;
113
114 bool selectGlobalValue(Register ResVReg, MachineInstr &I,
115 const MachineInstr *Init = nullptr) const;
116
117 bool selectUnOpWithSrc(Register ResVReg, const SPIRVType *ResType,
118 MachineInstr &I, Register SrcReg,
119 unsigned Opcode) const;
120 bool selectUnOp(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
121 unsigned Opcode) const;
122
123 bool selectBitcast(Register ResVReg, const SPIRVType *ResType,
124 MachineInstr &I) const;
125
126 bool selectLoad(Register ResVReg, const SPIRVType *ResType,
127 MachineInstr &I) const;
128 bool selectStore(MachineInstr &I) const;
129
130 bool selectStackSave(Register ResVReg, const SPIRVType *ResType,
131 MachineInstr &I) const;
132 bool selectStackRestore(MachineInstr &I) const;
133
134 bool selectMemOperation(Register ResVReg, MachineInstr &I) const;
135
136 bool selectAtomicRMW(Register ResVReg, const SPIRVType *ResType,
137 MachineInstr &I, unsigned NewOpcode,
138 unsigned NegateOpcode = 0) const;
139
140 bool selectAtomicCmpXchg(Register ResVReg, const SPIRVType *ResType,
141 MachineInstr &I) const;
142
143 bool selectFence(MachineInstr &I) const;
144
145 bool selectAddrSpaceCast(Register ResVReg, const SPIRVType *ResType,
146 MachineInstr &I) const;
147
148 bool selectAnyOrAll(Register ResVReg, const SPIRVType *ResType,
149 MachineInstr &I, unsigned OpType) const;
150
151 bool selectAll(Register ResVReg, const SPIRVType *ResType,
152 MachineInstr &I) const;
153
154 bool selectAny(Register ResVReg, const SPIRVType *ResType,
155 MachineInstr &I) const;
156
157 bool selectBitreverse(Register ResVReg, const SPIRVType *ResType,
158 MachineInstr &I) const;
159
160 bool selectConstVector(Register ResVReg, const SPIRVType *ResType,
161 MachineInstr &I) const;
162 bool selectSplatVector(Register ResVReg, const SPIRVType *ResType,
163 MachineInstr &I) const;
164
165 bool selectCmp(Register ResVReg, const SPIRVType *ResType,
166 unsigned comparisonOpcode, MachineInstr &I) const;
167
168 bool selectICmp(Register ResVReg, const SPIRVType *ResType,
169 MachineInstr &I) const;
170 bool selectFCmp(Register ResVReg, const SPIRVType *ResType,
171 MachineInstr &I) const;
172
173 void renderImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
174 int OpIdx) const;
175 void renderFImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
176 int OpIdx) const;
177
178 bool selectConst(Register ResVReg, const SPIRVType *ResType, const APInt &Imm,
179 MachineInstr &I) const;
180
181 bool selectSelect(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
182 bool IsSigned) const;
183 bool selectIToF(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
184 bool IsSigned, unsigned Opcode) const;
185 bool selectExt(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
186 bool IsSigned) const;
187
188 bool selectTrunc(Register ResVReg, const SPIRVType *ResType,
189 MachineInstr &I) const;
190
191 bool selectIntToBool(Register IntReg, Register ResVReg, MachineInstr &I,
192 const SPIRVType *intTy, const SPIRVType *boolTy) const;
193
194 bool selectOpUndef(Register ResVReg, const SPIRVType *ResType,
195 MachineInstr &I) const;
196 bool selectFreeze(Register ResVReg, const SPIRVType *ResType,
197 MachineInstr &I) const;
198 bool selectIntrinsic(Register ResVReg, const SPIRVType *ResType,
199 MachineInstr &I) const;
200 bool selectExtractVal(Register ResVReg, const SPIRVType *ResType,
201 MachineInstr &I) const;
202 bool selectInsertVal(Register ResVReg, const SPIRVType *ResType,
203 MachineInstr &I) const;
204 bool selectExtractElt(Register ResVReg, const SPIRVType *ResType,
205 MachineInstr &I) const;
206 bool selectInsertElt(Register ResVReg, const SPIRVType *ResType,
207 MachineInstr &I) const;
208 bool selectGEP(Register ResVReg, const SPIRVType *ResType,
209 MachineInstr &I) const;
210
211 bool selectFrameIndex(Register ResVReg, const SPIRVType *ResType,
212 MachineInstr &I) const;
213 bool selectAllocaArray(Register ResVReg, const SPIRVType *ResType,
214 MachineInstr &I) const;
215
216 bool selectBranch(MachineInstr &I) const;
217 bool selectBranchCond(MachineInstr &I) const;
218
219 bool selectPhi(Register ResVReg, const SPIRVType *ResType,
220 MachineInstr &I) const;
221
222 bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
223 MachineInstr &I, CL::OpenCLExtInst CLInst) const;
224 bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
225 MachineInstr &I, CL::OpenCLExtInst CLInst,
226 GL::GLSLExtInst GLInst) const;
227 bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
228 MachineInstr &I, const ExtInstList &ExtInsts) const;
229
230 bool selectLog10(Register ResVReg, const SPIRVType *ResType,
231 MachineInstr &I) const;
232
233 bool selectSpvThreadId(Register ResVReg, const SPIRVType *ResType,
234 MachineInstr &I) const;
235
237
238 Register buildI32Constant(uint32_t Val, MachineInstr &I,
239 const SPIRVType *ResType = nullptr) const;
240
241 Register buildZerosVal(const SPIRVType *ResType, MachineInstr &I) const;
242 Register buildZerosValF(const SPIRVType *ResType, MachineInstr &I) const;
243 Register buildOnesVal(bool AllOnes, const SPIRVType *ResType,
244 MachineInstr &I) const;
245
246 bool wrapIntoSpecConstantOp(MachineInstr &I,
247 SmallVector<Register> &CompositeArgs) const;
248};
249
250} // end anonymous namespace
251
252#define GET_GLOBALISEL_IMPL
253#include "SPIRVGenGlobalISel.inc"
254#undef GET_GLOBALISEL_IMPL
255
256SPIRVInstructionSelector::SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
257 const SPIRVSubtarget &ST,
258 const RegisterBankInfo &RBI)
259 : InstructionSelector(), STI(ST), TII(*ST.getInstrInfo()),
260 TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
262#include "SPIRVGenGlobalISel.inc"
265#include "SPIRVGenGlobalISel.inc"
267{
268}
269
270void SPIRVInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
271 CodeGenCoverage *CoverageInfo,
273 BlockFrequencyInfo *BFI) {
275 MRI = &MF.getRegInfo();
276 GR.setCurrentFunc(MF);
277 InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
278}
279
280static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI);
281
282// Defined in SPIRVLegalizerInfo.cpp.
283extern bool isTypeFoldingSupported(unsigned Opcode);
284
285bool SPIRVInstructionSelector::select(MachineInstr &I) {
286 assert(I.getParent() && "Instruction should be in a basic block!");
287 assert(I.getParent()->getParent() && "Instruction should be in a function!");
288
289 Register Opcode = I.getOpcode();
290 // If it's not a GMIR instruction, we've selected it already.
291 if (!isPreISelGenericOpcode(Opcode)) {
292 if (Opcode == SPIRV::ASSIGN_TYPE) { // These pseudos aren't needed any more.
293 Register DstReg = I.getOperand(0).getReg();
294 Register SrcReg = I.getOperand(1).getReg();
295 auto *Def = MRI->getVRegDef(SrcReg);
296 if (isTypeFoldingSupported(Def->getOpcode())) {
297 if (MRI->getType(DstReg).isPointer())
298 MRI->setType(DstReg, LLT::scalar(32));
299 bool Res = selectImpl(I, *CoverageInfo);
300 assert(Res || Def->getOpcode() == TargetOpcode::G_CONSTANT);
301 if (Res)
302 return Res;
303 }
304 MRI->replaceRegWith(SrcReg, DstReg);
305 I.removeFromParent();
306 return true;
307 } else if (I.getNumDefs() == 1) {
308 // Make all vregs 32 bits (for SPIR-V IDs).
309 MRI->setType(I.getOperand(0).getReg(), LLT::scalar(32));
310 }
312 }
313
314 if (I.getNumOperands() != I.getNumExplicitOperands()) {
315 LLVM_DEBUG(errs() << "Generic instr has unexpected implicit operands\n");
316 return false;
317 }
318
319 // Common code for getting return reg+type, and removing selected instr
320 // from parent occurs here. Instr-specific selection happens in spvSelect().
321 bool HasDefs = I.getNumDefs() > 0;
322 Register ResVReg = HasDefs ? I.getOperand(0).getReg() : Register(0);
323 SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) : nullptr;
324 assert(!HasDefs || ResType || I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
325 if (spvSelect(ResVReg, ResType, I)) {
326 if (HasDefs) // Make all vregs 32 bits (for SPIR-V IDs).
327 for (unsigned i = 0; i < I.getNumDefs(); ++i)
328 MRI->setType(I.getOperand(i).getReg(), LLT::scalar(32));
329 I.removeFromParent();
330 return true;
331 }
332 return false;
333}
334
335bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
336 const SPIRVType *ResType,
337 MachineInstr &I) const {
338 const unsigned Opcode = I.getOpcode();
339 if (isTypeFoldingSupported(Opcode) && Opcode != TargetOpcode::G_CONSTANT)
340 return selectImpl(I, *CoverageInfo);
341 switch (Opcode) {
342 case TargetOpcode::G_CONSTANT:
343 return selectConst(ResVReg, ResType, I.getOperand(1).getCImm()->getValue(),
344 I);
345 case TargetOpcode::G_GLOBAL_VALUE:
346 return selectGlobalValue(ResVReg, I);
347 case TargetOpcode::G_IMPLICIT_DEF:
348 return selectOpUndef(ResVReg, ResType, I);
349 case TargetOpcode::G_FREEZE:
350 return selectFreeze(ResVReg, ResType, I);
351
352 case TargetOpcode::G_INTRINSIC:
353 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
354 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
355 return selectIntrinsic(ResVReg, ResType, I);
356 case TargetOpcode::G_BITREVERSE:
357 return selectBitreverse(ResVReg, ResType, I);
358
359 case TargetOpcode::G_BUILD_VECTOR:
360 return selectConstVector(ResVReg, ResType, I);
361 case TargetOpcode::G_SPLAT_VECTOR:
362 return selectSplatVector(ResVReg, ResType, I);
363
364 case TargetOpcode::G_SHUFFLE_VECTOR: {
365 MachineBasicBlock &BB = *I.getParent();
366 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorShuffle))
367 .addDef(ResVReg)
368 .addUse(GR.getSPIRVTypeID(ResType))
369 .addUse(I.getOperand(1).getReg())
370 .addUse(I.getOperand(2).getReg());
371 for (auto V : I.getOperand(3).getShuffleMask())
372 MIB.addImm(V);
373 return MIB.constrainAllUses(TII, TRI, RBI);
374 }
375 case TargetOpcode::G_MEMMOVE:
376 case TargetOpcode::G_MEMCPY:
377 case TargetOpcode::G_MEMSET:
378 return selectMemOperation(ResVReg, I);
379
380 case TargetOpcode::G_ICMP:
381 return selectICmp(ResVReg, ResType, I);
382 case TargetOpcode::G_FCMP:
383 return selectFCmp(ResVReg, ResType, I);
384
385 case TargetOpcode::G_FRAME_INDEX:
386 return selectFrameIndex(ResVReg, ResType, I);
387
388 case TargetOpcode::G_LOAD:
389 return selectLoad(ResVReg, ResType, I);
390 case TargetOpcode::G_STORE:
391 return selectStore(I);
392
393 case TargetOpcode::G_BR:
394 return selectBranch(I);
395 case TargetOpcode::G_BRCOND:
396 return selectBranchCond(I);
397
398 case TargetOpcode::G_PHI:
399 return selectPhi(ResVReg, ResType, I);
400
401 case TargetOpcode::G_FPTOSI:
402 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToS);
403 case TargetOpcode::G_FPTOUI:
404 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToU);
405
406 case TargetOpcode::G_SITOFP:
407 return selectIToF(ResVReg, ResType, I, true, SPIRV::OpConvertSToF);
408 case TargetOpcode::G_UITOFP:
409 return selectIToF(ResVReg, ResType, I, false, SPIRV::OpConvertUToF);
410
411 case TargetOpcode::G_CTPOP:
412 return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitCount);
413 case TargetOpcode::G_SMIN:
414 return selectExtInst(ResVReg, ResType, I, CL::s_min, GL::SMin);
415 case TargetOpcode::G_UMIN:
416 return selectExtInst(ResVReg, ResType, I, CL::u_min, GL::UMin);
417
418 case TargetOpcode::G_SMAX:
419 return selectExtInst(ResVReg, ResType, I, CL::s_max, GL::SMax);
420 case TargetOpcode::G_UMAX:
421 return selectExtInst(ResVReg, ResType, I, CL::u_max, GL::UMax);
422
423 case TargetOpcode::G_FMA:
424 return selectExtInst(ResVReg, ResType, I, CL::fma, GL::Fma);
425
426 case TargetOpcode::G_FPOW:
427 return selectExtInst(ResVReg, ResType, I, CL::pow, GL::Pow);
428 case TargetOpcode::G_FPOWI:
429 return selectExtInst(ResVReg, ResType, I, CL::pown);
430
431 case TargetOpcode::G_FEXP:
432 return selectExtInst(ResVReg, ResType, I, CL::exp, GL::Exp);
433 case TargetOpcode::G_FEXP2:
434 return selectExtInst(ResVReg, ResType, I, CL::exp2, GL::Exp2);
435
436 case TargetOpcode::G_FLOG:
437 return selectExtInst(ResVReg, ResType, I, CL::log, GL::Log);
438 case TargetOpcode::G_FLOG2:
439 return selectExtInst(ResVReg, ResType, I, CL::log2, GL::Log2);
440 case TargetOpcode::G_FLOG10:
441 return selectLog10(ResVReg, ResType, I);
442
443 case TargetOpcode::G_FABS:
444 return selectExtInst(ResVReg, ResType, I, CL::fabs, GL::FAbs);
445 case TargetOpcode::G_ABS:
446 return selectExtInst(ResVReg, ResType, I, CL::s_abs, GL::SAbs);
447
448 case TargetOpcode::G_FMINNUM:
449 case TargetOpcode::G_FMINIMUM:
450 return selectExtInst(ResVReg, ResType, I, CL::fmin, GL::NMin);
451 case TargetOpcode::G_FMAXNUM:
452 case TargetOpcode::G_FMAXIMUM:
453 return selectExtInst(ResVReg, ResType, I, CL::fmax, GL::NMax);
454
455 case TargetOpcode::G_FCOPYSIGN:
456 return selectExtInst(ResVReg, ResType, I, CL::copysign);
457
458 case TargetOpcode::G_FCEIL:
459 return selectExtInst(ResVReg, ResType, I, CL::ceil, GL::Ceil);
460 case TargetOpcode::G_FFLOOR:
461 return selectExtInst(ResVReg, ResType, I, CL::floor, GL::Floor);
462
463 case TargetOpcode::G_FCOS:
464 return selectExtInst(ResVReg, ResType, I, CL::cos, GL::Cos);
465 case TargetOpcode::G_FSIN:
466 return selectExtInst(ResVReg, ResType, I, CL::sin, GL::Sin);
467
468 case TargetOpcode::G_FSQRT:
469 return selectExtInst(ResVReg, ResType, I, CL::sqrt, GL::Sqrt);
470
471 case TargetOpcode::G_CTTZ:
472 case TargetOpcode::G_CTTZ_ZERO_UNDEF:
473 return selectExtInst(ResVReg, ResType, I, CL::ctz);
474 case TargetOpcode::G_CTLZ:
475 case TargetOpcode::G_CTLZ_ZERO_UNDEF:
476 return selectExtInst(ResVReg, ResType, I, CL::clz);
477
478 case TargetOpcode::G_INTRINSIC_ROUND:
479 return selectExtInst(ResVReg, ResType, I, CL::round, GL::Round);
480 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
481 return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
482 case TargetOpcode::G_INTRINSIC_TRUNC:
483 return selectExtInst(ResVReg, ResType, I, CL::trunc, GL::Trunc);
484 case TargetOpcode::G_FRINT:
485 case TargetOpcode::G_FNEARBYINT:
486 return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
487
488 case TargetOpcode::G_SMULH:
489 return selectExtInst(ResVReg, ResType, I, CL::s_mul_hi);
490 case TargetOpcode::G_UMULH:
491 return selectExtInst(ResVReg, ResType, I, CL::u_mul_hi);
492
493 case TargetOpcode::G_SEXT:
494 return selectExt(ResVReg, ResType, I, true);
495 case TargetOpcode::G_ANYEXT:
496 case TargetOpcode::G_ZEXT:
497 return selectExt(ResVReg, ResType, I, false);
498 case TargetOpcode::G_TRUNC:
499 return selectTrunc(ResVReg, ResType, I);
500 case TargetOpcode::G_FPTRUNC:
501 case TargetOpcode::G_FPEXT:
502 return selectUnOp(ResVReg, ResType, I, SPIRV::OpFConvert);
503
504 case TargetOpcode::G_PTRTOINT:
505 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertPtrToU);
506 case TargetOpcode::G_INTTOPTR:
507 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertUToPtr);
508 case TargetOpcode::G_BITCAST:
509 return selectBitcast(ResVReg, ResType, I);
510 case TargetOpcode::G_ADDRSPACE_CAST:
511 return selectAddrSpaceCast(ResVReg, ResType, I);
512 case TargetOpcode::G_PTR_ADD: {
513 // Currently, we get G_PTR_ADD only as a result of translating
514 // global variables, initialized with constant expressions like GV + Const
515 // (see test opencl/basic/progvar_prog_scope_init.ll).
516 // TODO: extend the handler once we have other cases.
517 assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
518 Register GV = I.getOperand(1).getReg();
519 MachineRegisterInfo::def_instr_iterator II = MRI->def_instr_begin(GV);
520 (void)II;
521 assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
522 (*II).getOpcode() == TargetOpcode::COPY ||
523 (*II).getOpcode() == SPIRV::OpVariable) &&
524 isImm(I.getOperand(2), MRI));
525 Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32, I, TII), I);
526 MachineBasicBlock &BB = *I.getParent();
527 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
528 .addDef(ResVReg)
529 .addUse(GR.getSPIRVTypeID(ResType))
530 .addImm(static_cast<uint32_t>(
531 SPIRV::Opcode::InBoundsPtrAccessChain))
532 .addUse(GV)
533 .addUse(Idx)
534 .addUse(I.getOperand(2).getReg());
535 return MIB.constrainAllUses(TII, TRI, RBI);
536 }
537
538 case TargetOpcode::G_ATOMICRMW_OR:
539 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicOr);
540 case TargetOpcode::G_ATOMICRMW_ADD:
541 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicIAdd);
542 case TargetOpcode::G_ATOMICRMW_AND:
543 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicAnd);
544 case TargetOpcode::G_ATOMICRMW_MAX:
545 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMax);
546 case TargetOpcode::G_ATOMICRMW_MIN:
547 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMin);
548 case TargetOpcode::G_ATOMICRMW_SUB:
549 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicISub);
550 case TargetOpcode::G_ATOMICRMW_XOR:
551 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicXor);
552 case TargetOpcode::G_ATOMICRMW_UMAX:
553 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMax);
554 case TargetOpcode::G_ATOMICRMW_UMIN:
555 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMin);
556 case TargetOpcode::G_ATOMICRMW_XCHG:
557 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicExchange);
558 case TargetOpcode::G_ATOMIC_CMPXCHG:
559 return selectAtomicCmpXchg(ResVReg, ResType, I);
560
561 case TargetOpcode::G_ATOMICRMW_FADD:
562 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFAddEXT);
563 case TargetOpcode::G_ATOMICRMW_FSUB:
564 // Translate G_ATOMICRMW_FSUB to OpAtomicFAddEXT with negative value operand
565 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFAddEXT,
566 SPIRV::OpFNegate);
567 case TargetOpcode::G_ATOMICRMW_FMIN:
568 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFMinEXT);
569 case TargetOpcode::G_ATOMICRMW_FMAX:
570 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFMaxEXT);
571
572 case TargetOpcode::G_FENCE:
573 return selectFence(I);
574
575 case TargetOpcode::G_STACKSAVE:
576 return selectStackSave(ResVReg, ResType, I);
577 case TargetOpcode::G_STACKRESTORE:
578 return selectStackRestore(I);
579
580 case TargetOpcode::G_UNMERGE_VALUES:
581 return selectUnmergeValues(I);
582
583 default:
584 return false;
585 }
586}
587
588bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
589 const SPIRVType *ResType,
591 CL::OpenCLExtInst CLInst) const {
592 return selectExtInst(ResVReg, ResType, I,
593 {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
594}
595
596bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
597 const SPIRVType *ResType,
599 CL::OpenCLExtInst CLInst,
600 GL::GLSLExtInst GLInst) const {
601 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
602 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
603 return selectExtInst(ResVReg, ResType, I, ExtInsts);
604}
605
606bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
607 const SPIRVType *ResType,
609 const ExtInstList &Insts) const {
610
611 for (const auto &Ex : Insts) {
612 SPIRV::InstructionSet::InstructionSet Set = Ex.first;
613 uint32_t Opcode = Ex.second;
614 if (STI.canUseExtInstSet(Set)) {
615 MachineBasicBlock &BB = *I.getParent();
616 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
617 .addDef(ResVReg)
618 .addUse(GR.getSPIRVTypeID(ResType))
619 .addImm(static_cast<uint32_t>(Set))
620 .addImm(Opcode);
621 const unsigned NumOps = I.getNumOperands();
622 for (unsigned i = 1; i < NumOps; ++i)
623 MIB.add(I.getOperand(i));
624 return MIB.constrainAllUses(TII, TRI, RBI);
625 }
626 }
627 return false;
628}
629
630bool SPIRVInstructionSelector::selectUnOpWithSrc(Register ResVReg,
631 const SPIRVType *ResType,
633 Register SrcReg,
634 unsigned Opcode) const {
635 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
636 .addDef(ResVReg)
637 .addUse(GR.getSPIRVTypeID(ResType))
638 .addUse(SrcReg)
639 .constrainAllUses(TII, TRI, RBI);
640}
641
642bool SPIRVInstructionSelector::selectUnOp(Register ResVReg,
643 const SPIRVType *ResType,
645 unsigned Opcode) const {
646 return selectUnOpWithSrc(ResVReg, ResType, I, I.getOperand(1).getReg(),
647 Opcode);
648}
649
650bool SPIRVInstructionSelector::selectBitcast(Register ResVReg,
651 const SPIRVType *ResType,
652 MachineInstr &I) const {
653 Register OpReg = I.getOperand(1).getReg();
654 SPIRVType *OpType = OpReg.isValid() ? GR.getSPIRVTypeForVReg(OpReg) : nullptr;
655 if (!GR.isBitcastCompatible(ResType, OpType))
656 report_fatal_error("incompatible result and operand types in a bitcast");
657 return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
658}
659
660static SPIRV::Scope::Scope getScope(SyncScope::ID Ord,
662 if (Ord == SyncScope::SingleThread || Ord == MMI->Work_ItemSSID)
663 return SPIRV::Scope::Invocation;
664 else if (Ord == SyncScope::System || Ord == MMI->DeviceSSID)
665 return SPIRV::Scope::Device;
666 else if (Ord == MMI->WorkGroupSSID)
667 return SPIRV::Scope::Workgroup;
668 else if (Ord == MMI->AllSVMDevicesSSID)
669 return SPIRV::Scope::CrossDevice;
670 else if (Ord == MMI->SubGroupSSID)
671 return SPIRV::Scope::Subgroup;
672 else
673 // OpenCL approach is: "The functions that do not have memory_scope argument
674 // have the same semantics as the corresponding functions with the
675 // memory_scope argument set to memory_scope_device." See ref.: //
676 // https://registry.khronos.org/OpenCL/specs/3.0-unified/html/OpenCL_C.html#atomic-functions
677 // In our case if the scope is unknown, assuming that SPIR-V code is to be
678 // consumed in an OpenCL environment, we use the same approach and set the
679 // scope to memory_scope_device.
680 return SPIRV::Scope::Device;
681}
682
684 MachineInstrBuilder &MIB) {
685 uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
686 if (MemOp->isVolatile())
687 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
688 if (MemOp->isNonTemporal())
689 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
690 if (MemOp->getAlign().value())
691 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
692
693 if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
694 MIB.addImm(SpvMemOp);
695 if (SpvMemOp & static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
696 MIB.addImm(MemOp->getAlign().value());
697 }
698}
699
701 uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
702 if (Flags & MachineMemOperand::Flags::MOVolatile)
703 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
704 if (Flags & MachineMemOperand::Flags::MONonTemporal)
705 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
706
707 if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None))
708 MIB.addImm(SpvMemOp);
709}
710
711bool SPIRVInstructionSelector::selectLoad(Register ResVReg,
712 const SPIRVType *ResType,
713 MachineInstr &I) const {
714 unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
715 Register Ptr = I.getOperand(1 + OpOffset).getReg();
716 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
717 .addDef(ResVReg)
718 .addUse(GR.getSPIRVTypeID(ResType))
719 .addUse(Ptr);
720 if (!I.getNumMemOperands()) {
721 assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
722 I.getOpcode() ==
723 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
724 addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
725 } else {
726 addMemoryOperands(*I.memoperands_begin(), MIB);
727 }
728 return MIB.constrainAllUses(TII, TRI, RBI);
729}
730
731bool SPIRVInstructionSelector::selectStore(MachineInstr &I) const {
732 unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
733 Register StoreVal = I.getOperand(0 + OpOffset).getReg();
734 Register Ptr = I.getOperand(1 + OpOffset).getReg();
735 MachineBasicBlock &BB = *I.getParent();
736 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpStore))
737 .addUse(Ptr)
738 .addUse(StoreVal);
739 if (!I.getNumMemOperands()) {
740 assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
741 I.getOpcode() ==
742 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
743 addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
744 } else {
745 addMemoryOperands(*I.memoperands_begin(), MIB);
746 }
747 return MIB.constrainAllUses(TII, TRI, RBI);
748}
749
750bool SPIRVInstructionSelector::selectStackSave(Register ResVReg,
751 const SPIRVType *ResType,
752 MachineInstr &I) const {
753 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
755 "llvm.stacksave intrinsic: this instruction requires the following "
756 "SPIR-V extension: SPV_INTEL_variable_length_array",
757 false);
758 MachineBasicBlock &BB = *I.getParent();
759 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSaveMemoryINTEL))
760 .addDef(ResVReg)
761 .addUse(GR.getSPIRVTypeID(ResType))
762 .constrainAllUses(TII, TRI, RBI);
763}
764
765bool SPIRVInstructionSelector::selectStackRestore(MachineInstr &I) const {
766 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
768 "llvm.stackrestore intrinsic: this instruction requires the following "
769 "SPIR-V extension: SPV_INTEL_variable_length_array",
770 false);
771 if (!I.getOperand(0).isReg())
772 return false;
773 MachineBasicBlock &BB = *I.getParent();
774 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpRestoreMemoryINTEL))
775 .addUse(I.getOperand(0).getReg())
776 .constrainAllUses(TII, TRI, RBI);
777}
778
779bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
780 MachineInstr &I) const {
781 MachineBasicBlock &BB = *I.getParent();
782 Register SrcReg = I.getOperand(1).getReg();
783 if (I.getOpcode() == TargetOpcode::G_MEMSET) {
784 assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
785 unsigned Val = getIConstVal(I.getOperand(1).getReg(), MRI);
786 unsigned Num = getIConstVal(I.getOperand(2).getReg(), MRI);
787 SPIRVType *ValTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
788 SPIRVType *ArrTy = GR.getOrCreateSPIRVArrayType(ValTy, Num, I, TII);
789 Register Const = GR.getOrCreateConsIntArray(Val, I, ArrTy, TII);
790 SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType(
791 ArrTy, I, TII, SPIRV::StorageClass::UniformConstant);
792 // TODO: check if we have such GV, add init, use buildGlobalVariable.
793 Function &CurFunction = GR.CurMF->getFunction();
794 Type *LLVMArrTy =
795 ArrayType::get(IntegerType::get(CurFunction.getContext(), 8), Num);
796 // Module takes ownership of the global var.
797 GlobalVariable *GV = new GlobalVariable(*CurFunction.getParent(), LLVMArrTy,
799 Constant::getNullValue(LLVMArrTy));
800 Register VarReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
801 GR.add(GV, GR.CurMF, VarReg);
802
803 buildOpDecorate(VarReg, I, TII, SPIRV::Decoration::Constant, {});
804 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
805 .addDef(VarReg)
806 .addUse(GR.getSPIRVTypeID(VarTy))
807 .addImm(SPIRV::StorageClass::UniformConstant)
808 .addUse(Const)
809 .constrainAllUses(TII, TRI, RBI);
810 SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType(
811 ValTy, I, TII, SPIRV::StorageClass::UniformConstant);
812 SrcReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
813 selectUnOpWithSrc(SrcReg, SourceTy, I, VarReg, SPIRV::OpBitcast);
814 }
815 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCopyMemorySized))
816 .addUse(I.getOperand(0).getReg())
817 .addUse(SrcReg)
818 .addUse(I.getOperand(2).getReg());
819 if (I.getNumMemOperands())
820 addMemoryOperands(*I.memoperands_begin(), MIB);
821 bool Result = MIB.constrainAllUses(TII, TRI, RBI);
822 if (ResVReg.isValid() && ResVReg != MIB->getOperand(0).getReg())
823 BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg)
824 .addUse(MIB->getOperand(0).getReg());
825 return Result;
826}
827
828bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
829 const SPIRVType *ResType,
831 unsigned NewOpcode,
832 unsigned NegateOpcode) const {
833 assert(I.hasOneMemOperand());
834 const MachineMemOperand *MemOp = *I.memoperands_begin();
836 static_cast<uint32_t>(getScope(MemOp->getSyncScopeID(), MMI));
837 Register ScopeReg = buildI32Constant(Scope, I);
838
839 Register Ptr = I.getOperand(1).getReg();
840 // TODO: Changed as it's implemented in the translator. See test/atomicrmw.ll
841 // auto ScSem =
842 // getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr));
843 AtomicOrdering AO = MemOp->getSuccessOrdering();
844 uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
845 Register MemSemReg = buildI32Constant(MemSem /*| ScSem*/, I);
846
847 bool Result = false;
848 Register ValueReg = I.getOperand(2).getReg();
849 if (NegateOpcode != 0) {
850 // Translation with negative value operand is requested
851 Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
852 Result |= selectUnOpWithSrc(TmpReg, ResType, I, ValueReg, NegateOpcode);
853 ValueReg = TmpReg;
854 }
855
856 Result |= BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode))
857 .addDef(ResVReg)
858 .addUse(GR.getSPIRVTypeID(ResType))
859 .addUse(Ptr)
860 .addUse(ScopeReg)
861 .addUse(MemSemReg)
862 .addUse(ValueReg)
863 .constrainAllUses(TII, TRI, RBI);
864 return Result;
865}
866
867bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &I) const {
868 unsigned ArgI = I.getNumOperands() - 1;
869 Register SrcReg =
870 I.getOperand(ArgI).isReg() ? I.getOperand(ArgI).getReg() : Register(0);
871 SPIRVType *DefType =
872 SrcReg.isValid() ? GR.getSPIRVTypeForVReg(SrcReg) : nullptr;
873 if (!DefType || DefType->getOpcode() != SPIRV::OpTypeVector)
875 "cannot select G_UNMERGE_VALUES with a non-vector argument");
876
877 SPIRVType *ScalarType =
878 GR.getSPIRVTypeForVReg(DefType->getOperand(1).getReg());
879 MachineBasicBlock &BB = *I.getParent();
880 bool Res = false;
881 for (unsigned i = 0; i < I.getNumDefs(); ++i) {
882 Register ResVReg = I.getOperand(i).getReg();
883 SPIRVType *ResType = GR.getSPIRVTypeForVReg(ResVReg);
884 if (!ResType) {
885 // There was no "assign type" actions, let's fix this now
886 ResType = ScalarType;
887 MRI->setRegClass(ResVReg, &SPIRV::IDRegClass);
888 MRI->setType(ResVReg, LLT::scalar(GR.getScalarOrVectorBitWidth(ResType)));
889 GR.assignSPIRVTypeToVReg(ResType, ResVReg, *GR.CurMF);
890 }
891 auto MIB =
892 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
893 .addDef(ResVReg)
894 .addUse(GR.getSPIRVTypeID(ResType))
895 .addUse(SrcReg)
896 .addImm(static_cast<int64_t>(i));
897 Res |= MIB.constrainAllUses(TII, TRI, RBI);
898 }
899 return Res;
900}
901
902bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const {
903 AtomicOrdering AO = AtomicOrdering(I.getOperand(0).getImm());
904 uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
905 Register MemSemReg = buildI32Constant(MemSem, I);
906 SyncScope::ID Ord = SyncScope::ID(I.getOperand(1).getImm());
907 uint32_t Scope = static_cast<uint32_t>(getScope(Ord, MMI));
908 Register ScopeReg = buildI32Constant(Scope, I);
909 MachineBasicBlock &BB = *I.getParent();
910 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier))
911 .addUse(ScopeReg)
912 .addUse(MemSemReg)
913 .constrainAllUses(TII, TRI, RBI);
914}
915
916bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
917 const SPIRVType *ResType,
918 MachineInstr &I) const {
919 Register ScopeReg;
920 Register MemSemEqReg;
921 Register MemSemNeqReg;
922 Register Ptr = I.getOperand(2).getReg();
923 if (!isa<GIntrinsic>(I)) {
924 assert(I.hasOneMemOperand());
925 const MachineMemOperand *MemOp = *I.memoperands_begin();
926 unsigned Scope =
927 static_cast<uint32_t>(getScope(MemOp->getSyncScopeID(), MMI));
928 ScopeReg = buildI32Constant(Scope, I);
929
930 unsigned ScSem = static_cast<uint32_t>(
931 getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr)));
932 AtomicOrdering AO = MemOp->getSuccessOrdering();
933 unsigned MemSemEq = static_cast<uint32_t>(getMemSemantics(AO)) | ScSem;
934 MemSemEqReg = buildI32Constant(MemSemEq, I);
935 AtomicOrdering FO = MemOp->getFailureOrdering();
936 unsigned MemSemNeq = static_cast<uint32_t>(getMemSemantics(FO)) | ScSem;
937 MemSemNeqReg =
938 MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq, I);
939 } else {
940 ScopeReg = I.getOperand(5).getReg();
941 MemSemEqReg = I.getOperand(6).getReg();
942 MemSemNeqReg = I.getOperand(7).getReg();
943 }
944
945 Register Cmp = I.getOperand(3).getReg();
946 Register Val = I.getOperand(4).getReg();
947 SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
948 Register ACmpRes = MRI->createVirtualRegister(&SPIRV::IDRegClass);
949 const DebugLoc &DL = I.getDebugLoc();
950 bool Result =
951 BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange))
952 .addDef(ACmpRes)
953 .addUse(GR.getSPIRVTypeID(SpvValTy))
954 .addUse(Ptr)
955 .addUse(ScopeReg)
956 .addUse(MemSemEqReg)
957 .addUse(MemSemNeqReg)
958 .addUse(Val)
959 .addUse(Cmp)
960 .constrainAllUses(TII, TRI, RBI);
961 Register CmpSuccReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
962 SPIRVType *BoolTy = GR.getOrCreateSPIRVBoolType(I, TII);
963 Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpIEqual))
964 .addDef(CmpSuccReg)
965 .addUse(GR.getSPIRVTypeID(BoolTy))
966 .addUse(ACmpRes)
967 .addUse(Cmp)
968 .constrainAllUses(TII, TRI, RBI);
969 Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
970 Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
971 .addDef(TmpReg)
972 .addUse(GR.getSPIRVTypeID(ResType))
973 .addUse(ACmpRes)
974 .addUse(GR.getOrCreateUndef(I, ResType, TII))
975 .addImm(0)
976 .constrainAllUses(TII, TRI, RBI);
977 Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
978 .addDef(ResVReg)
979 .addUse(GR.getSPIRVTypeID(ResType))
980 .addUse(CmpSuccReg)
981 .addUse(TmpReg)
982 .addImm(1)
983 .constrainAllUses(TII, TRI, RBI);
984 return Result;
985}
986
987static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC) {
988 switch (SC) {
989 case SPIRV::StorageClass::Workgroup:
990 case SPIRV::StorageClass::CrossWorkgroup:
991 case SPIRV::StorageClass::Function:
992 return true;
993 default:
994 return false;
995 }
996}
997
998static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC) {
999 switch (SC) {
1000 case SPIRV::StorageClass::DeviceOnlyINTEL:
1001 case SPIRV::StorageClass::HostOnlyINTEL:
1002 return true;
1003 default:
1004 return false;
1005 }
1006}
1007
1008// In SPIR-V address space casting can only happen to and from the Generic
1009// storage class. We can also only cast Workgroup, CrossWorkgroup, or Function
1010// pointers to and from Generic pointers. As such, we can convert e.g. from
1011// Workgroup to Function by going via a Generic pointer as an intermediary. All
1012// other combinations can only be done by a bitcast, and are probably not safe.
1013bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg,
1014 const SPIRVType *ResType,
1015 MachineInstr &I) const {
1016 // If the AddrSpaceCast user is single and in OpConstantComposite or
1017 // OpVariable, we should select OpSpecConstantOp.
1018 auto UIs = MRI->use_instructions(ResVReg);
1019 if (!UIs.empty() && ++UIs.begin() == UIs.end() &&
1020 (UIs.begin()->getOpcode() == SPIRV::OpConstantComposite ||
1021 UIs.begin()->getOpcode() == SPIRV::OpVariable ||
1022 isSpvIntrinsic(*UIs.begin(), Intrinsic::spv_init_global))) {
1023 Register NewReg = I.getOperand(1).getReg();
1024 MachineBasicBlock &BB = *I.getParent();
1025 SPIRVType *SpvBaseTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
1026 ResType = GR.getOrCreateSPIRVPointerType(SpvBaseTy, I, TII,
1027 SPIRV::StorageClass::Generic);
1028 bool Result =
1029 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
1030 .addDef(ResVReg)
1031 .addUse(GR.getSPIRVTypeID(ResType))
1032 .addImm(static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric))
1033 .addUse(NewReg)
1034 .constrainAllUses(TII, TRI, RBI);
1035 return Result;
1036 }
1037 Register SrcPtr = I.getOperand(1).getReg();
1038 SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
1039 SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtr);
1040 SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResVReg);
1041
1042 // don't generate a cast between identical storage classes
1043 if (SrcSC == DstSC)
1044 return true;
1045
1046 // Casting from an eligible pointer to Generic.
1047 if (DstSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(SrcSC))
1048 return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric);
1049 // Casting from Generic to an eligible pointer.
1050 if (SrcSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(DstSC))
1051 return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr);
1052 // Casting between 2 eligible pointers using Generic as an intermediary.
1053 if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) {
1054 Register Tmp = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1055 SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
1056 SrcPtrTy, I, TII, SPIRV::StorageClass::Generic);
1057 MachineBasicBlock &BB = *I.getParent();
1058 const DebugLoc &DL = I.getDebugLoc();
1059 bool Success = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric))
1060 .addDef(Tmp)
1061 .addUse(GR.getSPIRVTypeID(GenericPtrTy))
1062 .addUse(SrcPtr)
1063 .constrainAllUses(TII, TRI, RBI);
1064 return Success && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr))
1065 .addDef(ResVReg)
1066 .addUse(GR.getSPIRVTypeID(ResType))
1067 .addUse(Tmp)
1068 .constrainAllUses(TII, TRI, RBI);
1069 }
1070
1071 // Check if instructions from the SPV_INTEL_usm_storage_classes extension may
1072 // be applied
1073 if (isUSMStorageClass(SrcSC) && DstSC == SPIRV::StorageClass::CrossWorkgroup)
1074 return selectUnOp(ResVReg, ResType, I,
1075 SPIRV::OpPtrCastToCrossWorkgroupINTEL);
1076 if (SrcSC == SPIRV::StorageClass::CrossWorkgroup && isUSMStorageClass(DstSC))
1077 return selectUnOp(ResVReg, ResType, I,
1078 SPIRV::OpCrossWorkgroupCastToPtrINTEL);
1079
1080 // TODO Should this case just be disallowed completely?
1081 // We're casting 2 other arbitrary address spaces, so have to bitcast.
1082 return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
1083}
1084
1085static unsigned getFCmpOpcode(unsigned PredNum) {
1086 auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1087 switch (Pred) {
1088 case CmpInst::FCMP_OEQ:
1089 return SPIRV::OpFOrdEqual;
1090 case CmpInst::FCMP_OGE:
1091 return SPIRV::OpFOrdGreaterThanEqual;
1092 case CmpInst::FCMP_OGT:
1093 return SPIRV::OpFOrdGreaterThan;
1094 case CmpInst::FCMP_OLE:
1095 return SPIRV::OpFOrdLessThanEqual;
1096 case CmpInst::FCMP_OLT:
1097 return SPIRV::OpFOrdLessThan;
1098 case CmpInst::FCMP_ONE:
1099 return SPIRV::OpFOrdNotEqual;
1100 case CmpInst::FCMP_ORD:
1101 return SPIRV::OpOrdered;
1102 case CmpInst::FCMP_UEQ:
1103 return SPIRV::OpFUnordEqual;
1104 case CmpInst::FCMP_UGE:
1105 return SPIRV::OpFUnordGreaterThanEqual;
1106 case CmpInst::FCMP_UGT:
1107 return SPIRV::OpFUnordGreaterThan;
1108 case CmpInst::FCMP_ULE:
1109 return SPIRV::OpFUnordLessThanEqual;
1110 case CmpInst::FCMP_ULT:
1111 return SPIRV::OpFUnordLessThan;
1112 case CmpInst::FCMP_UNE:
1113 return SPIRV::OpFUnordNotEqual;
1114 case CmpInst::FCMP_UNO:
1115 return SPIRV::OpUnordered;
1116 default:
1117 llvm_unreachable("Unknown predicate type for FCmp");
1118 }
1119}
1120
1121static unsigned getICmpOpcode(unsigned PredNum) {
1122 auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1123 switch (Pred) {
1124 case CmpInst::ICMP_EQ:
1125 return SPIRV::OpIEqual;
1126 case CmpInst::ICMP_NE:
1127 return SPIRV::OpINotEqual;
1128 case CmpInst::ICMP_SGE:
1129 return SPIRV::OpSGreaterThanEqual;
1130 case CmpInst::ICMP_SGT:
1131 return SPIRV::OpSGreaterThan;
1132 case CmpInst::ICMP_SLE:
1133 return SPIRV::OpSLessThanEqual;
1134 case CmpInst::ICMP_SLT:
1135 return SPIRV::OpSLessThan;
1136 case CmpInst::ICMP_UGE:
1137 return SPIRV::OpUGreaterThanEqual;
1138 case CmpInst::ICMP_UGT:
1139 return SPIRV::OpUGreaterThan;
1140 case CmpInst::ICMP_ULE:
1141 return SPIRV::OpULessThanEqual;
1142 case CmpInst::ICMP_ULT:
1143 return SPIRV::OpULessThan;
1144 default:
1145 llvm_unreachable("Unknown predicate type for ICmp");
1146 }
1147}
1148
1149static unsigned getPtrCmpOpcode(unsigned Pred) {
1150 switch (static_cast<CmpInst::Predicate>(Pred)) {
1151 case CmpInst::ICMP_EQ:
1152 return SPIRV::OpPtrEqual;
1153 case CmpInst::ICMP_NE:
1154 return SPIRV::OpPtrNotEqual;
1155 default:
1156 llvm_unreachable("Unknown predicate type for pointer comparison");
1157 }
1158}
1159
1160// Return the logical operation, or abort if none exists.
1161static unsigned getBoolCmpOpcode(unsigned PredNum) {
1162 auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1163 switch (Pred) {
1164 case CmpInst::ICMP_EQ:
1165 return SPIRV::OpLogicalEqual;
1166 case CmpInst::ICMP_NE:
1167 return SPIRV::OpLogicalNotEqual;
1168 default:
1169 llvm_unreachable("Unknown predicate type for Bool comparison");
1170 }
1171}
1172
1173bool SPIRVInstructionSelector::selectAnyOrAll(Register ResVReg,
1174 const SPIRVType *ResType,
1175 MachineInstr &I,
1176 unsigned OpAnyOrAll) const {
1177 assert(I.getNumOperands() == 3);
1178 assert(I.getOperand(2).isReg());
1179 MachineBasicBlock &BB = *I.getParent();
1180 Register InputRegister = I.getOperand(2).getReg();
1181 SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
1182
1183 if (!InputType)
1184 report_fatal_error("Input Type could not be determined.");
1185
1186 bool IsBoolTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeBool);
1187 bool IsVectorTy = InputType->getOpcode() == SPIRV::OpTypeVector;
1188 if (IsBoolTy && !IsVectorTy) {
1189 assert(ResVReg == I.getOperand(0).getReg());
1190 return BuildMI(*I.getParent(), I, I.getDebugLoc(),
1191 TII.get(TargetOpcode::COPY))
1192 .addDef(ResVReg)
1193 .addUse(InputRegister)
1194 .constrainAllUses(TII, TRI, RBI);
1195 }
1196
1197 bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
1198 unsigned SpirvNotEqualId =
1199 IsFloatTy ? SPIRV::OpFOrdNotEqual : SPIRV::OpINotEqual;
1200 SPIRVType *SpvBoolScalarTy = GR.getOrCreateSPIRVBoolType(I, TII);
1201 SPIRVType *SpvBoolTy = SpvBoolScalarTy;
1202 Register NotEqualReg = ResVReg;
1203
1204 if (IsVectorTy) {
1205 NotEqualReg = IsBoolTy ? InputRegister
1206 : MRI->createVirtualRegister(&SPIRV::IDRegClass);
1207 const unsigned NumElts = InputType->getOperand(2).getImm();
1208 SpvBoolTy = GR.getOrCreateSPIRVVectorType(SpvBoolTy, NumElts, I, TII);
1209 }
1210
1211 if (!IsBoolTy) {
1212 Register ConstZeroReg =
1213 IsFloatTy ? buildZerosValF(InputType, I) : buildZerosVal(InputType, I);
1214
1215 BuildMI(BB, I, I.getDebugLoc(), TII.get(SpirvNotEqualId))
1216 .addDef(NotEqualReg)
1217 .addUse(GR.getSPIRVTypeID(SpvBoolTy))
1218 .addUse(InputRegister)
1219 .addUse(ConstZeroReg)
1220 .constrainAllUses(TII, TRI, RBI);
1221 }
1222
1223 if (!IsVectorTy)
1224 return true;
1225
1226 return BuildMI(BB, I, I.getDebugLoc(), TII.get(OpAnyOrAll))
1227 .addDef(ResVReg)
1228 .addUse(GR.getSPIRVTypeID(SpvBoolScalarTy))
1229 .addUse(NotEqualReg)
1230 .constrainAllUses(TII, TRI, RBI);
1231}
1232
1233bool SPIRVInstructionSelector::selectAll(Register ResVReg,
1234 const SPIRVType *ResType,
1235 MachineInstr &I) const {
1236 return selectAnyOrAll(ResVReg, ResType, I, SPIRV::OpAll);
1237}
1238
1239bool SPIRVInstructionSelector::selectAny(Register ResVReg,
1240 const SPIRVType *ResType,
1241 MachineInstr &I) const {
1242 return selectAnyOrAll(ResVReg, ResType, I, SPIRV::OpAny);
1243}
1244
1245bool SPIRVInstructionSelector::selectBitreverse(Register ResVReg,
1246 const SPIRVType *ResType,
1247 MachineInstr &I) const {
1248 MachineBasicBlock &BB = *I.getParent();
1249 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitReverse))
1250 .addDef(ResVReg)
1251 .addUse(GR.getSPIRVTypeID(ResType))
1252 .addUse(I.getOperand(1).getReg())
1253 .constrainAllUses(TII, TRI, RBI);
1254}
1255
1256bool SPIRVInstructionSelector::selectFreeze(Register ResVReg,
1257 const SPIRVType *ResType,
1258 MachineInstr &I) const {
1259 // There is no way to implement `freeze` correctly without support on SPIR-V
1260 // standard side, but we may at least address a simple (static) case when
1261 // undef/poison value presence is obvious. The main benefit of even
1262 // incomplete `freeze` support is preventing of translation from crashing due
1263 // to lack of support on legalization and instruction selection steps.
1264 if (!I.getOperand(0).isReg() || !I.getOperand(1).isReg())
1265 return false;
1266 Register OpReg = I.getOperand(1).getReg();
1267 if (MachineInstr *Def = MRI->getVRegDef(OpReg)) {
1268 Register Reg;
1269 switch (Def->getOpcode()) {
1270 case SPIRV::ASSIGN_TYPE:
1271 if (MachineInstr *AssignToDef =
1272 MRI->getVRegDef(Def->getOperand(1).getReg())) {
1273 if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1274 Reg = Def->getOperand(2).getReg();
1275 }
1276 break;
1277 case SPIRV::OpUndef:
1278 Reg = Def->getOperand(1).getReg();
1279 break;
1280 }
1281 unsigned DestOpCode;
1282 if (Reg.isValid()) {
1283 DestOpCode = SPIRV::OpConstantNull;
1284 } else {
1285 DestOpCode = TargetOpcode::COPY;
1286 Reg = OpReg;
1287 }
1288 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(DestOpCode))
1289 .addDef(I.getOperand(0).getReg())
1290 .addUse(Reg)
1291 .constrainAllUses(TII, TRI, RBI);
1292 }
1293 return false;
1294}
1295
1296bool SPIRVInstructionSelector::selectConstVector(Register ResVReg,
1297 const SPIRVType *ResType,
1298 MachineInstr &I) const {
1299 // TODO: only const case is supported for now.
1300 assert(std::all_of(
1301 I.operands_begin(), I.operands_end(), [this](const MachineOperand &MO) {
1302 if (MO.isDef())
1303 return true;
1304 if (!MO.isReg())
1305 return false;
1306 SPIRVType *ConstTy = this->MRI->getVRegDef(MO.getReg());
1307 assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
1308 ConstTy->getOperand(1).isReg());
1309 Register ConstReg = ConstTy->getOperand(1).getReg();
1310 const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
1311 assert(Const);
1312 return (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
1313 Const->getOpcode() == TargetOpcode::G_FCONSTANT);
1314 }));
1315
1316 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
1317 TII.get(SPIRV::OpConstantComposite))
1318 .addDef(ResVReg)
1319 .addUse(GR.getSPIRVTypeID(ResType));
1320 for (unsigned i = I.getNumExplicitDefs(); i < I.getNumExplicitOperands(); ++i)
1321 MIB.addUse(I.getOperand(i).getReg());
1322 return MIB.constrainAllUses(TII, TRI, RBI);
1323}
1324
1326 const SPIRVType *ResType) {
1327 Register OpReg = ResType->getOperand(2).getReg();
1328 SPIRVType *OpDef = MRI->getVRegDef(OpReg);
1329 if (!OpDef)
1330 return 0;
1331 if (OpDef->getOpcode() == SPIRV::ASSIGN_TYPE &&
1332 OpDef->getOperand(1).isReg()) {
1333 if (SPIRVType *RefDef = MRI->getVRegDef(OpDef->getOperand(1).getReg()))
1334 OpDef = RefDef;
1335 }
1336 unsigned N = OpDef->getOpcode() == TargetOpcode::G_CONSTANT
1337 ? OpDef->getOperand(1).getCImm()->getValue().getZExtValue()
1338 : 0;
1339 return N;
1340}
1341
1342// Return true if the type represents a constant register
1344 if (OpDef->getOpcode() == SPIRV::ASSIGN_TYPE &&
1345 OpDef->getOperand(1).isReg()) {
1346 if (SPIRVType *RefDef = MRI->getVRegDef(OpDef->getOperand(1).getReg()))
1347 OpDef = RefDef;
1348 }
1349 return OpDef->getOpcode() == TargetOpcode::G_CONSTANT ||
1350 OpDef->getOpcode() == TargetOpcode::G_FCONSTANT;
1351}
1352
1353// Return true if the virtual register represents a constant
1355 if (SPIRVType *OpDef = MRI->getVRegDef(OpReg))
1356 return isConstReg(MRI, OpDef);
1357 return false;
1358}
1359
1360bool SPIRVInstructionSelector::selectSplatVector(Register ResVReg,
1361 const SPIRVType *ResType,
1362 MachineInstr &I) const {
1363 unsigned N = 0;
1364 if (ResType->getOpcode() == SPIRV::OpTypeVector)
1365 N = GR.getScalarOrVectorComponentCount(ResType);
1366 else if (ResType->getOpcode() == SPIRV::OpTypeArray)
1367 N = getArrayComponentCount(MRI, ResType);
1368 else
1369 report_fatal_error("Cannot select G_SPLAT_VECTOR with a non-vector result");
1370
1371 unsigned OpIdx = I.getNumExplicitDefs();
1372 if (!I.getOperand(OpIdx).isReg())
1373 report_fatal_error("Unexpected argument in G_SPLAT_VECTOR");
1374
1375 // check if we may construct a constant vector
1376 Register OpReg = I.getOperand(OpIdx).getReg();
1377 bool IsConst = isConstReg(MRI, OpReg);
1378
1379 if (!IsConst && N < 2)
1381 "There must be at least two constituent operands in a vector");
1382
1383 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
1384 TII.get(IsConst ? SPIRV::OpConstantComposite
1385 : SPIRV::OpCompositeConstruct))
1386 .addDef(ResVReg)
1387 .addUse(GR.getSPIRVTypeID(ResType));
1388 for (unsigned i = 0; i < N; ++i)
1389 MIB.addUse(OpReg);
1390 return MIB.constrainAllUses(TII, TRI, RBI);
1391}
1392
1393bool SPIRVInstructionSelector::selectCmp(Register ResVReg,
1394 const SPIRVType *ResType,
1395 unsigned CmpOpc,
1396 MachineInstr &I) const {
1397 Register Cmp0 = I.getOperand(2).getReg();
1398 Register Cmp1 = I.getOperand(3).getReg();
1399 assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
1400 GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
1401 "CMP operands should have the same type");
1402 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CmpOpc))
1403 .addDef(ResVReg)
1404 .addUse(GR.getSPIRVTypeID(ResType))
1405 .addUse(Cmp0)
1406 .addUse(Cmp1)
1407 .constrainAllUses(TII, TRI, RBI);
1408}
1409
1410bool SPIRVInstructionSelector::selectICmp(Register ResVReg,
1411 const SPIRVType *ResType,
1412 MachineInstr &I) const {
1413 auto Pred = I.getOperand(1).getPredicate();
1414 unsigned CmpOpc;
1415
1416 Register CmpOperand = I.getOperand(2).getReg();
1417 if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
1418 CmpOpc = getPtrCmpOpcode(Pred);
1419 else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
1420 CmpOpc = getBoolCmpOpcode(Pred);
1421 else
1422 CmpOpc = getICmpOpcode(Pred);
1423 return selectCmp(ResVReg, ResType, CmpOpc, I);
1424}
1425
1426void SPIRVInstructionSelector::renderFImm32(MachineInstrBuilder &MIB,
1427 const MachineInstr &I,
1428 int OpIdx) const {
1429 assert(I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
1430 "Expected G_FCONSTANT");
1431 const ConstantFP *FPImm = I.getOperand(1).getFPImm();
1432 addNumImm(FPImm->getValueAPF().bitcastToAPInt(), MIB);
1433}
1434
1435void SPIRVInstructionSelector::renderImm32(MachineInstrBuilder &MIB,
1436 const MachineInstr &I,
1437 int OpIdx) const {
1438 assert(I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1439 "Expected G_CONSTANT");
1440 addNumImm(I.getOperand(1).getCImm()->getValue(), MIB);
1441}
1442
1444SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I,
1445 const SPIRVType *ResType) const {
1446 Type *LLVMTy = IntegerType::get(GR.CurMF->getFunction().getContext(), 32);
1447 const SPIRVType *SpvI32Ty =
1448 ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32, I, TII);
1449 // Find a constant in DT or build a new one.
1450 auto ConstInt = ConstantInt::get(LLVMTy, Val);
1451 Register NewReg = GR.find(ConstInt, GR.CurMF);
1452 if (!NewReg.isValid()) {
1453 NewReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
1454 GR.add(ConstInt, GR.CurMF, NewReg);
1456 MachineBasicBlock &BB = *I.getParent();
1457 if (Val == 0) {
1458 MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1459 .addDef(NewReg)
1460 .addUse(GR.getSPIRVTypeID(SpvI32Ty));
1461 } else {
1462 MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
1463 .addDef(NewReg)
1464 .addUse(GR.getSPIRVTypeID(SpvI32Ty))
1465 .addImm(APInt(32, Val).getZExtValue());
1466 }
1468 }
1469 return NewReg;
1470}
1471
1472bool SPIRVInstructionSelector::selectFCmp(Register ResVReg,
1473 const SPIRVType *ResType,
1474 MachineInstr &I) const {
1475 unsigned CmpOp = getFCmpOpcode(I.getOperand(1).getPredicate());
1476 return selectCmp(ResVReg, ResType, CmpOp, I);
1477}
1478
1479Register SPIRVInstructionSelector::buildZerosVal(const SPIRVType *ResType,
1480 MachineInstr &I) const {
1481 // OpenCL uses nulls for Zero. In HLSL we don't use null constants.
1482 bool ZeroAsNull = STI.isOpenCLEnv();
1483 if (ResType->getOpcode() == SPIRV::OpTypeVector)
1484 return GR.getOrCreateConstVector(0UL, I, ResType, TII, ZeroAsNull);
1485 return GR.getOrCreateConstInt(0, I, ResType, TII, ZeroAsNull);
1486}
1487
1488static APFloat getZeroFP(const Type *LLVMFloatTy) {
1489 if (!LLVMFloatTy)
1490 return APFloat::getZero(APFloat::IEEEsingle());
1491 switch (LLVMFloatTy->getScalarType()->getTypeID()) {
1492 case Type::HalfTyID:
1493 return APFloat::getZero(APFloat::IEEEhalf());
1494 default:
1495 case Type::FloatTyID:
1496 return APFloat::getZero(APFloat::IEEEsingle());
1497 case Type::DoubleTyID:
1498 return APFloat::getZero(APFloat::IEEEdouble());
1499 }
1500}
1501
1502Register SPIRVInstructionSelector::buildZerosValF(const SPIRVType *ResType,
1503 MachineInstr &I) const {
1504 // OpenCL uses nulls for Zero. In HLSL we don't use null constants.
1505 bool ZeroAsNull = STI.isOpenCLEnv();
1506 APFloat VZero = getZeroFP(GR.getTypeForSPIRVType(ResType));
1507 if (ResType->getOpcode() == SPIRV::OpTypeVector)
1508 return GR.getOrCreateConstVector(VZero, I, ResType, TII, ZeroAsNull);
1509 return GR.getOrCreateConstFP(VZero, I, ResType, TII, ZeroAsNull);
1510}
1511
1512Register SPIRVInstructionSelector::buildOnesVal(bool AllOnes,
1513 const SPIRVType *ResType,
1514 MachineInstr &I) const {
1515 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1516 APInt One =
1517 AllOnes ? APInt::getAllOnes(BitWidth) : APInt::getOneBitSet(BitWidth, 0);
1518 if (ResType->getOpcode() == SPIRV::OpTypeVector)
1519 return GR.getOrCreateConstVector(One.getZExtValue(), I, ResType, TII);
1520 return GR.getOrCreateConstInt(One.getZExtValue(), I, ResType, TII);
1521}
1522
1523bool SPIRVInstructionSelector::selectSelect(Register ResVReg,
1524 const SPIRVType *ResType,
1525 MachineInstr &I,
1526 bool IsSigned) const {
1527 // To extend a bool, we need to use OpSelect between constants.
1528 Register ZeroReg = buildZerosVal(ResType, I);
1529 Register OneReg = buildOnesVal(IsSigned, ResType, I);
1530 bool IsScalarBool =
1531 GR.isScalarOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool);
1532 unsigned Opcode =
1533 IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectSIVCond;
1534 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
1535 .addDef(ResVReg)
1536 .addUse(GR.getSPIRVTypeID(ResType))
1537 .addUse(I.getOperand(1).getReg())
1538 .addUse(OneReg)
1539 .addUse(ZeroReg)
1540 .constrainAllUses(TII, TRI, RBI);
1541}
1542
1543bool SPIRVInstructionSelector::selectIToF(Register ResVReg,
1544 const SPIRVType *ResType,
1545 MachineInstr &I, bool IsSigned,
1546 unsigned Opcode) const {
1547 Register SrcReg = I.getOperand(1).getReg();
1548 // We can convert bool value directly to float type without OpConvert*ToF,
1549 // however the translator generates OpSelect+OpConvert*ToF, so we do the same.
1550 if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
1551 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1552 SPIRVType *TmpType = GR.getOrCreateSPIRVIntegerType(BitWidth, I, TII);
1553 if (ResType->getOpcode() == SPIRV::OpTypeVector) {
1554 const unsigned NumElts = ResType->getOperand(2).getImm();
1555 TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII);
1556 }
1557 SrcReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1558 selectSelect(SrcReg, TmpType, I, false);
1559 }
1560 return selectUnOpWithSrc(ResVReg, ResType, I, SrcReg, Opcode);
1561}
1562
1563bool SPIRVInstructionSelector::selectExt(Register ResVReg,
1564 const SPIRVType *ResType,
1565 MachineInstr &I, bool IsSigned) const {
1566 if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool))
1567 return selectSelect(ResVReg, ResType, I, IsSigned);
1568 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1569 return selectUnOp(ResVReg, ResType, I, Opcode);
1570}
1571
1572bool SPIRVInstructionSelector::selectIntToBool(Register IntReg,
1573 Register ResVReg,
1574 MachineInstr &I,
1575 const SPIRVType *IntTy,
1576 const SPIRVType *BoolTy) const {
1577 // To truncate to a bool, we use OpBitwiseAnd 1 and OpINotEqual to zero.
1578 Register BitIntReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1579 bool IsVectorTy = IntTy->getOpcode() == SPIRV::OpTypeVector;
1580 unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
1581 Register Zero = buildZerosVal(IntTy, I);
1582 Register One = buildOnesVal(false, IntTy, I);
1583 MachineBasicBlock &BB = *I.getParent();
1584 BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1585 .addDef(BitIntReg)
1586 .addUse(GR.getSPIRVTypeID(IntTy))
1587 .addUse(IntReg)
1588 .addUse(One)
1589 .constrainAllUses(TII, TRI, RBI);
1590 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
1591 .addDef(ResVReg)
1592 .addUse(GR.getSPIRVTypeID(BoolTy))
1593 .addUse(BitIntReg)
1594 .addUse(Zero)
1595 .constrainAllUses(TII, TRI, RBI);
1596}
1597
1598bool SPIRVInstructionSelector::selectTrunc(Register ResVReg,
1599 const SPIRVType *ResType,
1600 MachineInstr &I) const {
1601 if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool)) {
1602 Register IntReg = I.getOperand(1).getReg();
1603 const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
1604 return selectIntToBool(IntReg, ResVReg, I, ArgType, ResType);
1605 }
1606 bool IsSigned = GR.isScalarOrVectorSigned(ResType);
1607 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1608 return selectUnOp(ResVReg, ResType, I, Opcode);
1609}
1610
1611bool SPIRVInstructionSelector::selectConst(Register ResVReg,
1612 const SPIRVType *ResType,
1613 const APInt &Imm,
1614 MachineInstr &I) const {
1615 unsigned TyOpcode = ResType->getOpcode();
1616 assert(TyOpcode != SPIRV::OpTypePointer || Imm.isZero());
1617 MachineBasicBlock &BB = *I.getParent();
1618 if ((TyOpcode == SPIRV::OpTypePointer || TyOpcode == SPIRV::OpTypeEvent) &&
1619 Imm.isZero())
1620 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1621 .addDef(ResVReg)
1622 .addUse(GR.getSPIRVTypeID(ResType))
1623 .constrainAllUses(TII, TRI, RBI);
1624 if (TyOpcode == SPIRV::OpTypeInt) {
1625 assert(Imm.getBitWidth() <= 64 && "Unsupported integer width!");
1626 Register Reg = GR.getOrCreateConstInt(Imm.getZExtValue(), I, ResType, TII);
1627 if (Reg == ResVReg)
1628 return true;
1629 return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
1630 .addDef(ResVReg)
1631 .addUse(Reg)
1632 .constrainAllUses(TII, TRI, RBI);
1633 }
1634 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
1635 .addDef(ResVReg)
1636 .addUse(GR.getSPIRVTypeID(ResType));
1637 // <=32-bit integers should be caught by the sdag pattern.
1638 assert(Imm.getBitWidth() > 32);
1639 addNumImm(Imm, MIB);
1640 return MIB.constrainAllUses(TII, TRI, RBI);
1641}
1642
1643bool SPIRVInstructionSelector::selectOpUndef(Register ResVReg,
1644 const SPIRVType *ResType,
1645 MachineInstr &I) const {
1646 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
1647 .addDef(ResVReg)
1648 .addUse(GR.getSPIRVTypeID(ResType))
1649 .constrainAllUses(TII, TRI, RBI);
1650}
1651
1653 assert(MO.isReg());
1654 const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
1655 if (TypeInst->getOpcode() != SPIRV::ASSIGN_TYPE)
1656 return false;
1657 assert(TypeInst->getOperand(1).isReg());
1658 MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
1659 return ImmInst->getOpcode() == TargetOpcode::G_CONSTANT;
1660}
1661
1662static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
1663 const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
1664 MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
1665 assert(ImmInst->getOpcode() == TargetOpcode::G_CONSTANT);
1666 return ImmInst->getOperand(1).getCImm()->getZExtValue();
1667}
1668
1669bool SPIRVInstructionSelector::selectInsertVal(Register ResVReg,
1670 const SPIRVType *ResType,
1671 MachineInstr &I) const {
1672 MachineBasicBlock &BB = *I.getParent();
1673 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeInsert))
1674 .addDef(ResVReg)
1675 .addUse(GR.getSPIRVTypeID(ResType))
1676 // object to insert
1677 .addUse(I.getOperand(3).getReg())
1678 // composite to insert into
1679 .addUse(I.getOperand(2).getReg());
1680 for (unsigned i = 4; i < I.getNumOperands(); i++)
1681 MIB.addImm(foldImm(I.getOperand(i), MRI));
1682 return MIB.constrainAllUses(TII, TRI, RBI);
1683}
1684
1685bool SPIRVInstructionSelector::selectExtractVal(Register ResVReg,
1686 const SPIRVType *ResType,
1687 MachineInstr &I) const {
1688 MachineBasicBlock &BB = *I.getParent();
1689 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
1690 .addDef(ResVReg)
1691 .addUse(GR.getSPIRVTypeID(ResType))
1692 .addUse(I.getOperand(2).getReg());
1693 for (unsigned i = 3; i < I.getNumOperands(); i++)
1694 MIB.addImm(foldImm(I.getOperand(i), MRI));
1695 return MIB.constrainAllUses(TII, TRI, RBI);
1696}
1697
1698bool SPIRVInstructionSelector::selectInsertElt(Register ResVReg,
1699 const SPIRVType *ResType,
1700 MachineInstr &I) const {
1701 if (isImm(I.getOperand(4), MRI))
1702 return selectInsertVal(ResVReg, ResType, I);
1703 MachineBasicBlock &BB = *I.getParent();
1704 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorInsertDynamic))
1705 .addDef(ResVReg)
1706 .addUse(GR.getSPIRVTypeID(ResType))
1707 .addUse(I.getOperand(2).getReg())
1708 .addUse(I.getOperand(3).getReg())
1709 .addUse(I.getOperand(4).getReg())
1710 .constrainAllUses(TII, TRI, RBI);
1711}
1712
1713bool SPIRVInstructionSelector::selectExtractElt(Register ResVReg,
1714 const SPIRVType *ResType,
1715 MachineInstr &I) const {
1716 if (isImm(I.getOperand(3), MRI))
1717 return selectExtractVal(ResVReg, ResType, I);
1718 MachineBasicBlock &BB = *I.getParent();
1719 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorExtractDynamic))
1720 .addDef(ResVReg)
1721 .addUse(GR.getSPIRVTypeID(ResType))
1722 .addUse(I.getOperand(2).getReg())
1723 .addUse(I.getOperand(3).getReg())
1724 .constrainAllUses(TII, TRI, RBI);
1725}
1726
1727bool SPIRVInstructionSelector::selectGEP(Register ResVReg,
1728 const SPIRVType *ResType,
1729 MachineInstr &I) const {
1730 const bool IsGEPInBounds = I.getOperand(2).getImm();
1731
1732 // OpAccessChain could be used for OpenCL, but the SPIRV-LLVM Translator only
1733 // relies on PtrAccessChain, so we'll try not to deviate. For Vulkan however,
1734 // we have to use Op[InBounds]AccessChain.
1735 const unsigned Opcode = STI.isVulkanEnv()
1736 ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
1737 : SPIRV::OpAccessChain)
1738 : (IsGEPInBounds ? SPIRV::OpInBoundsPtrAccessChain
1739 : SPIRV::OpPtrAccessChain);
1740
1741 auto Res = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
1742 .addDef(ResVReg)
1743 .addUse(GR.getSPIRVTypeID(ResType))
1744 // Object to get a pointer to.
1745 .addUse(I.getOperand(3).getReg());
1746 // Adding indices.
1747 const unsigned StartingIndex =
1748 (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
1749 ? 5
1750 : 4;
1751 for (unsigned i = StartingIndex; i < I.getNumExplicitOperands(); ++i)
1752 Res.addUse(I.getOperand(i).getReg());
1753 return Res.constrainAllUses(TII, TRI, RBI);
1754}
1755
1756// Maybe wrap a value into OpSpecConstantOp
1757bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
1758 MachineInstr &I, SmallVector<Register> &CompositeArgs) const {
1759 bool Result = true;
1760 unsigned Lim = I.getNumExplicitOperands();
1761 for (unsigned i = I.getNumExplicitDefs() + 1; i < Lim; ++i) {
1762 Register OpReg = I.getOperand(i).getReg();
1763 SPIRVType *OpDefine = MRI->getVRegDef(OpReg);
1764 SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
1765 if (!OpDefine || !OpType || isConstReg(MRI, OpDefine) ||
1766 OpDefine->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST) {
1767 // The case of G_ADDRSPACE_CAST inside spv_const_composite() is processed
1768 // by selectAddrSpaceCast()
1769 CompositeArgs.push_back(OpReg);
1770 continue;
1771 }
1772 MachineFunction *MF = I.getMF();
1773 Register WrapReg = GR.find(OpDefine, MF);
1774 if (WrapReg.isValid()) {
1775 CompositeArgs.push_back(WrapReg);
1776 continue;
1777 }
1778 // Create a new register for the wrapper
1779 WrapReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1780 GR.add(OpDefine, MF, WrapReg);
1781 CompositeArgs.push_back(WrapReg);
1782 // Decorate the wrapper register and generate a new instruction
1783 MRI->setType(WrapReg, LLT::pointer(0, 32));
1784 GR.assignSPIRVTypeToVReg(OpType, WrapReg, *MF);
1785 MachineBasicBlock &BB = *I.getParent();
1786 Result = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
1787 .addDef(WrapReg)
1788 .addUse(GR.getSPIRVTypeID(OpType))
1789 .addImm(static_cast<uint32_t>(SPIRV::Opcode::Bitcast))
1790 .addUse(OpReg)
1791 .constrainAllUses(TII, TRI, RBI);
1792 if (!Result)
1793 break;
1794 }
1795 return Result;
1796}
1797
1798bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
1799 const SPIRVType *ResType,
1800 MachineInstr &I) const {
1801 MachineBasicBlock &BB = *I.getParent();
1802 Intrinsic::ID IID = cast<GIntrinsic>(I).getIntrinsicID();
1803 switch (IID) {
1804 case Intrinsic::spv_load:
1805 return selectLoad(ResVReg, ResType, I);
1806 case Intrinsic::spv_store:
1807 return selectStore(I);
1808 case Intrinsic::spv_extractv:
1809 return selectExtractVal(ResVReg, ResType, I);
1810 case Intrinsic::spv_insertv:
1811 return selectInsertVal(ResVReg, ResType, I);
1812 case Intrinsic::spv_extractelt:
1813 return selectExtractElt(ResVReg, ResType, I);
1814 case Intrinsic::spv_insertelt:
1815 return selectInsertElt(ResVReg, ResType, I);
1816 case Intrinsic::spv_gep:
1817 return selectGEP(ResVReg, ResType, I);
1818 case Intrinsic::spv_unref_global:
1819 case Intrinsic::spv_init_global: {
1820 MachineInstr *MI = MRI->getVRegDef(I.getOperand(1).getReg());
1821 MachineInstr *Init = I.getNumExplicitOperands() > 2
1822 ? MRI->getVRegDef(I.getOperand(2).getReg())
1823 : nullptr;
1824 assert(MI);
1825 return selectGlobalValue(MI->getOperand(0).getReg(), *MI, Init);
1826 }
1827 case Intrinsic::spv_undef: {
1828 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
1829 .addDef(ResVReg)
1830 .addUse(GR.getSPIRVTypeID(ResType));
1831 return MIB.constrainAllUses(TII, TRI, RBI);
1832 }
1833 case Intrinsic::spv_const_composite: {
1834 // If no values are attached, the composite is null constant.
1835 bool IsNull = I.getNumExplicitDefs() + 1 == I.getNumExplicitOperands();
1836 // Select a proper instruction.
1837 unsigned Opcode = SPIRV::OpConstantNull;
1838 SmallVector<Register> CompositeArgs;
1839 if (!IsNull) {
1840 Opcode = SPIRV::OpConstantComposite;
1841 if (!wrapIntoSpecConstantOp(I, CompositeArgs))
1842 return false;
1843 }
1844 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1845 .addDef(ResVReg)
1846 .addUse(GR.getSPIRVTypeID(ResType));
1847 // skip type MD node we already used when generated assign.type for this
1848 if (!IsNull) {
1849 for (Register OpReg : CompositeArgs)
1850 MIB.addUse(OpReg);
1851 }
1852 return MIB.constrainAllUses(TII, TRI, RBI);
1853 }
1854 case Intrinsic::spv_assign_name: {
1855 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpName));
1856 MIB.addUse(I.getOperand(I.getNumExplicitDefs() + 1).getReg());
1857 for (unsigned i = I.getNumExplicitDefs() + 2;
1858 i < I.getNumExplicitOperands(); ++i) {
1859 MIB.addImm(I.getOperand(i).getImm());
1860 }
1861 return MIB.constrainAllUses(TII, TRI, RBI);
1862 }
1863 case Intrinsic::spv_switch: {
1864 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSwitch));
1865 for (unsigned i = 1; i < I.getNumExplicitOperands(); ++i) {
1866 if (I.getOperand(i).isReg())
1867 MIB.addReg(I.getOperand(i).getReg());
1868 else if (I.getOperand(i).isCImm())
1869 addNumImm(I.getOperand(i).getCImm()->getValue(), MIB);
1870 else if (I.getOperand(i).isMBB())
1871 MIB.addMBB(I.getOperand(i).getMBB());
1872 else
1873 llvm_unreachable("Unexpected OpSwitch operand");
1874 }
1875 return MIB.constrainAllUses(TII, TRI, RBI);
1876 }
1877 case Intrinsic::spv_cmpxchg:
1878 return selectAtomicCmpXchg(ResVReg, ResType, I);
1879 case Intrinsic::spv_unreachable:
1880 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUnreachable));
1881 break;
1882 case Intrinsic::spv_alloca:
1883 return selectFrameIndex(ResVReg, ResType, I);
1884 case Intrinsic::spv_alloca_array:
1885 return selectAllocaArray(ResVReg, ResType, I);
1886 case Intrinsic::spv_assume:
1887 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
1888 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpAssumeTrueKHR))
1889 .addUse(I.getOperand(1).getReg());
1890 break;
1891 case Intrinsic::spv_expect:
1892 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
1893 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExpectKHR))
1894 .addDef(ResVReg)
1895 .addUse(GR.getSPIRVTypeID(ResType))
1896 .addUse(I.getOperand(2).getReg())
1897 .addUse(I.getOperand(3).getReg());
1898 break;
1899 case Intrinsic::spv_thread_id:
1900 return selectSpvThreadId(ResVReg, ResType, I);
1901 case Intrinsic::spv_all:
1902 return selectAll(ResVReg, ResType, I);
1903 case Intrinsic::spv_any:
1904 return selectAny(ResVReg, ResType, I);
1905 case Intrinsic::spv_lifetime_start:
1906 case Intrinsic::spv_lifetime_end: {
1907 unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
1908 : SPIRV::OpLifetimeStop;
1909 int64_t Size = I.getOperand(I.getNumExplicitDefs() + 1).getImm();
1910 Register PtrReg = I.getOperand(I.getNumExplicitDefs() + 2).getReg();
1911 unsigned PonteeOpType = GR.getPointeeTypeOp(PtrReg);
1912 bool IsNonvoidPtr = PonteeOpType != 0 && PonteeOpType != SPIRV::OpTypeVoid;
1913 if (Size == -1 || IsNonvoidPtr)
1914 Size = 0;
1915 BuildMI(BB, I, I.getDebugLoc(), TII.get(Op)).addUse(PtrReg).addImm(Size);
1916 } break;
1917 default: {
1918 std::string DiagMsg;
1919 raw_string_ostream OS(DiagMsg);
1920 I.print(OS);
1921 DiagMsg = "Intrinsic selection not implemented: " + DiagMsg;
1922 report_fatal_error(DiagMsg.c_str(), false);
1923 }
1924 }
1925 return true;
1926}
1927
1928bool SPIRVInstructionSelector::selectAllocaArray(Register ResVReg,
1929 const SPIRVType *ResType,
1930 MachineInstr &I) const {
1931 // there was an allocation size parameter to the allocation instruction
1932 // that is not 1
1933 MachineBasicBlock &BB = *I.getParent();
1934 return BuildMI(BB, I, I.getDebugLoc(),
1935 TII.get(SPIRV::OpVariableLengthArrayINTEL))
1936 .addDef(ResVReg)
1937 .addUse(GR.getSPIRVTypeID(ResType))
1938 .addUse(I.getOperand(2).getReg())
1939 .constrainAllUses(TII, TRI, RBI);
1940}
1941
1942bool SPIRVInstructionSelector::selectFrameIndex(Register ResVReg,
1943 const SPIRVType *ResType,
1944 MachineInstr &I) const {
1945 // Change order of instructions if needed: all OpVariable instructions in a
1946 // function must be the first instructions in the first block
1947 MachineFunction *MF = I.getParent()->getParent();
1948 MachineBasicBlock *MBB = &MF->front();
1949 auto It = MBB->SkipPHIsAndLabels(MBB->begin()), E = MBB->end();
1950 bool IsHeader = false;
1951 unsigned Opcode;
1952 for (; It != E && It != I; ++It) {
1953 Opcode = It->getOpcode();
1954 if (Opcode == SPIRV::OpFunction || Opcode == SPIRV::OpFunctionParameter) {
1955 IsHeader = true;
1956 } else if (IsHeader &&
1957 !(Opcode == SPIRV::ASSIGN_TYPE || Opcode == SPIRV::OpLabel)) {
1958 ++It;
1959 break;
1960 }
1961 }
1962 return BuildMI(*MBB, It, It->getDebugLoc(), TII.get(SPIRV::OpVariable))
1963 .addDef(ResVReg)
1964 .addUse(GR.getSPIRVTypeID(ResType))
1965 .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function))
1966 .constrainAllUses(TII, TRI, RBI);
1967}
1968
1969bool SPIRVInstructionSelector::selectBranch(MachineInstr &I) const {
1970 // InstructionSelector walks backwards through the instructions. We can use
1971 // both a G_BR and a G_BRCOND to create an OpBranchConditional. We hit G_BR
1972 // first, so can generate an OpBranchConditional here. If there is no
1973 // G_BRCOND, we just use OpBranch for a regular unconditional branch.
1974 const MachineInstr *PrevI = I.getPrevNode();
1975 MachineBasicBlock &MBB = *I.getParent();
1976 if (PrevI != nullptr && PrevI->getOpcode() == TargetOpcode::G_BRCOND) {
1977 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
1978 .addUse(PrevI->getOperand(0).getReg())
1979 .addMBB(PrevI->getOperand(1).getMBB())
1980 .addMBB(I.getOperand(0).getMBB())
1981 .constrainAllUses(TII, TRI, RBI);
1982 }
1983 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranch))
1984 .addMBB(I.getOperand(0).getMBB())
1985 .constrainAllUses(TII, TRI, RBI);
1986}
1987
1988bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &I) const {
1989 // InstructionSelector walks backwards through the instructions. For an
1990 // explicit conditional branch with no fallthrough, we use both a G_BR and a
1991 // G_BRCOND to create an OpBranchConditional. We should hit G_BR first, and
1992 // generate the OpBranchConditional in selectBranch above.
1993 //
1994 // If an OpBranchConditional has been generated, we simply return, as the work
1995 // is alread done. If there is no OpBranchConditional, LLVM must be relying on
1996 // implicit fallthrough to the next basic block, so we need to create an
1997 // OpBranchConditional with an explicit "false" argument pointing to the next
1998 // basic block that LLVM would fall through to.
1999 const MachineInstr *NextI = I.getNextNode();
2000 // Check if this has already been successfully selected.
2001 if (NextI != nullptr && NextI->getOpcode() == SPIRV::OpBranchConditional)
2002 return true;
2003 // Must be relying on implicit block fallthrough, so generate an
2004 // OpBranchConditional with the "next" basic block as the "false" target.
2005 MachineBasicBlock &MBB = *I.getParent();
2006 unsigned NextMBBNum = MBB.getNextNode()->getNumber();
2007 MachineBasicBlock *NextMBB = I.getMF()->getBlockNumbered(NextMBBNum);
2008 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
2009 .addUse(I.getOperand(0).getReg())
2010 .addMBB(I.getOperand(1).getMBB())
2011 .addMBB(NextMBB)
2012 .constrainAllUses(TII, TRI, RBI);
2013}
2014
2015bool SPIRVInstructionSelector::selectPhi(Register ResVReg,
2016 const SPIRVType *ResType,
2017 MachineInstr &I) const {
2018 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpPhi))
2019 .addDef(ResVReg)
2020 .addUse(GR.getSPIRVTypeID(ResType));
2021 const unsigned NumOps = I.getNumOperands();
2022 for (unsigned i = 1; i < NumOps; i += 2) {
2023 MIB.addUse(I.getOperand(i + 0).getReg());
2024 MIB.addMBB(I.getOperand(i + 1).getMBB());
2025 }
2026 return MIB.constrainAllUses(TII, TRI, RBI);
2027}
2028
2029bool SPIRVInstructionSelector::selectGlobalValue(
2030 Register ResVReg, MachineInstr &I, const MachineInstr *Init) const {
2031 // FIXME: don't use MachineIRBuilder here, replace it with BuildMI.
2032 MachineIRBuilder MIRBuilder(I);
2033 const GlobalValue *GV = I.getOperand(1).getGlobal();
2034 Type *GVType = GR.getDeducedGlobalValueType(GV);
2035 SPIRVType *PointerBaseType;
2036 if (GVType->isArrayTy()) {
2037 SPIRVType *ArrayElementType =
2038 GR.getOrCreateSPIRVType(GVType->getArrayElementType(), MIRBuilder,
2039 SPIRV::AccessQualifier::ReadWrite, false);
2040 PointerBaseType = GR.getOrCreateSPIRVArrayType(
2041 ArrayElementType, GVType->getArrayNumElements(), I, TII);
2042 } else {
2043 PointerBaseType = GR.getOrCreateSPIRVType(
2044 GVType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false);
2045 }
2046 SPIRVType *ResType = GR.getOrCreateSPIRVPointerType(
2047 PointerBaseType, I, TII,
2049
2050 std::string GlobalIdent;
2051 if (!GV->hasName()) {
2052 unsigned &ID = UnnamedGlobalIDs[GV];
2053 if (ID == 0)
2054 ID = UnnamedGlobalIDs.size();
2055 GlobalIdent = "__unnamed_" + Twine(ID).str();
2056 } else {
2057 GlobalIdent = GV->getGlobalIdentifier();
2058 }
2059
2060 // Behaviour of functions as operands depends on availability of the
2061 // corresponding extension (SPV_INTEL_function_pointers):
2062 // - If there is an extension to operate with functions as operands:
2063 // We create a proper constant operand and evaluate a correct type for a
2064 // function pointer.
2065 // - Without the required extension:
2066 // We have functions as operands in tests with blocks of instruction e.g. in
2067 // transcoding/global_block.ll. These operands are not used and should be
2068 // substituted by zero constants. Their type is expected to be always
2069 // OpTypePointer Function %uchar.
2070 if (isa<Function>(GV)) {
2071 const Constant *ConstVal = GV;
2072 MachineBasicBlock &BB = *I.getParent();
2073 Register NewReg = GR.find(ConstVal, GR.CurMF);
2074 if (!NewReg.isValid()) {
2075 Register NewReg = ResVReg;
2076 GR.add(ConstVal, GR.CurMF, NewReg);
2077 const Function *GVFun =
2078 STI.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)
2079 ? dyn_cast<Function>(GV)
2080 : nullptr;
2081 if (GVFun) {
2082 // References to a function via function pointers generate virtual
2083 // registers without a definition. We will resolve it later, during
2084 // module analysis stage.
2085 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2086 Register FuncVReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
2087 MRI->setRegClass(FuncVReg, &SPIRV::IDRegClass);
2089 BuildMI(BB, I, I.getDebugLoc(),
2090 TII.get(SPIRV::OpConstantFunctionPointerINTEL))
2091 .addDef(NewReg)
2092 .addUse(GR.getSPIRVTypeID(ResType))
2093 .addUse(FuncVReg);
2094 // mapping the function pointer to the used Function
2095 GR.recordFunctionPointer(&MB.getInstr()->getOperand(2), GVFun);
2096 return MB.constrainAllUses(TII, TRI, RBI);
2097 }
2098 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
2099 .addDef(NewReg)
2100 .addUse(GR.getSPIRVTypeID(ResType))
2101 .constrainAllUses(TII, TRI, RBI);
2102 }
2103 assert(NewReg != ResVReg);
2104 return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
2105 .addDef(ResVReg)
2106 .addUse(NewReg)
2107 .constrainAllUses(TII, TRI, RBI);
2108 }
2109 auto GlobalVar = cast<GlobalVariable>(GV);
2110 assert(GlobalVar->getName() != "llvm.global.annotations");
2111
2112 bool HasInit = GlobalVar->hasInitializer() &&
2113 !isa<UndefValue>(GlobalVar->getInitializer());
2114 // Skip empty declaration for GVs with initilaizers till we get the decl with
2115 // passed initializer.
2116 if (HasInit && !Init)
2117 return true;
2118
2119 unsigned AddrSpace = GV->getAddressSpace();
2120 SPIRV::StorageClass::StorageClass Storage =
2121 addressSpaceToStorageClass(AddrSpace, STI);
2122 bool HasLnkTy = GV->getLinkage() != GlobalValue::InternalLinkage &&
2123 Storage != SPIRV::StorageClass::Function;
2124 SPIRV::LinkageType::LinkageType LnkType =
2126 ? SPIRV::LinkageType::Import
2128 STI.canUseExtension(SPIRV::Extension::SPV_KHR_linkonce_odr)
2129 ? SPIRV::LinkageType::LinkOnceODR
2130 : SPIRV::LinkageType::Export);
2131
2132 Register Reg = GR.buildGlobalVariable(ResVReg, ResType, GlobalIdent, GV,
2133 Storage, Init, GlobalVar->isConstant(),
2134 HasLnkTy, LnkType, MIRBuilder, true);
2135 return Reg.isValid();
2136}
2137
2138bool SPIRVInstructionSelector::selectLog10(Register ResVReg,
2139 const SPIRVType *ResType,
2140 MachineInstr &I) const {
2141 if (STI.canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std)) {
2142 return selectExtInst(ResVReg, ResType, I, CL::log10);
2143 }
2144
2145 // There is no log10 instruction in the GLSL Extended Instruction set, so it
2146 // is implemented as:
2147 // log10(x) = log2(x) * (1 / log2(10))
2148 // = log2(x) * 0.30103
2149
2150 MachineIRBuilder MIRBuilder(I);
2151 MachineBasicBlock &BB = *I.getParent();
2152
2153 // Build log2(x).
2154 Register VarReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
2155 bool Result =
2156 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
2157 .addDef(VarReg)
2158 .addUse(GR.getSPIRVTypeID(ResType))
2159 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
2160 .addImm(GL::Log2)
2161 .add(I.getOperand(1))
2162 .constrainAllUses(TII, TRI, RBI);
2163
2164 // Build 0.30103.
2165 assert(ResType->getOpcode() == SPIRV::OpTypeVector ||
2166 ResType->getOpcode() == SPIRV::OpTypeFloat);
2167 // TODO: Add matrix implementation once supported by the HLSL frontend.
2168 const SPIRVType *SpirvScalarType =
2169 ResType->getOpcode() == SPIRV::OpTypeVector
2170 ? GR.getSPIRVTypeForVReg(ResType->getOperand(1).getReg())
2171 : ResType;
2172 Register ScaleReg =
2173 GR.buildConstantFP(APFloat(0.30103f), MIRBuilder, SpirvScalarType);
2174
2175 // Multiply log2(x) by 0.30103 to get log10(x) result.
2176 auto Opcode = ResType->getOpcode() == SPIRV::OpTypeVector
2177 ? SPIRV::OpVectorTimesScalar
2178 : SPIRV::OpFMulS;
2179 Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
2180 .addDef(ResVReg)
2181 .addUse(GR.getSPIRVTypeID(ResType))
2182 .addUse(VarReg)
2183 .addUse(ScaleReg)
2184 .constrainAllUses(TII, TRI, RBI);
2185
2186 return Result;
2187}
2188
2189bool SPIRVInstructionSelector::selectSpvThreadId(Register ResVReg,
2190 const SPIRVType *ResType,
2191 MachineInstr &I) const {
2192 // DX intrinsic: @llvm.dx.thread.id(i32)
2193 // ID Name Description
2194 // 93 ThreadId reads the thread ID
2195
2196 MachineIRBuilder MIRBuilder(I);
2197 const SPIRVType *U32Type = GR.getOrCreateSPIRVIntegerType(32, MIRBuilder);
2198 const SPIRVType *Vec3Ty =
2199 GR.getOrCreateSPIRVVectorType(U32Type, 3, MIRBuilder);
2200 const SPIRVType *PtrType = GR.getOrCreateSPIRVPointerType(
2201 Vec3Ty, MIRBuilder, SPIRV::StorageClass::Input);
2202
2203 // Create new register for GlobalInvocationID builtin variable.
2204 Register NewRegister =
2205 MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::IDRegClass);
2206 MIRBuilder.getMRI()->setType(NewRegister, LLT::pointer(0, 32));
2207 GR.assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
2208
2209 // Build GlobalInvocationID global variable with the necessary decorations.
2210 Register Variable = GR.buildGlobalVariable(
2211 NewRegister, PtrType,
2212 getLinkStringForBuiltIn(SPIRV::BuiltIn::GlobalInvocationId), nullptr,
2213 SPIRV::StorageClass::Input, nullptr, true, true,
2214 SPIRV::LinkageType::Import, MIRBuilder, false);
2215
2216 // Create new register for loading value.
2217 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2218 Register LoadedRegister = MRI->createVirtualRegister(&SPIRV::IDRegClass);
2219 MIRBuilder.getMRI()->setType(LoadedRegister, LLT::pointer(0, 32));
2220 GR.assignSPIRVTypeToVReg(Vec3Ty, LoadedRegister, MIRBuilder.getMF());
2221
2222 // Load v3uint value from the global variable.
2223 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
2224 .addDef(LoadedRegister)
2225 .addUse(GR.getSPIRVTypeID(Vec3Ty))
2226 .addUse(Variable);
2227
2228 // Get Thread ID index. Expecting operand is a constant immediate value,
2229 // wrapped in a type assignment.
2230 assert(I.getOperand(2).isReg());
2231 Register ThreadIdReg = I.getOperand(2).getReg();
2232 SPIRVType *ConstTy = this->MRI->getVRegDef(ThreadIdReg);
2233 assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
2234 ConstTy->getOperand(1).isReg());
2235 Register ConstReg = ConstTy->getOperand(1).getReg();
2236 const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
2237 assert(Const && Const->getOpcode() == TargetOpcode::G_CONSTANT);
2238 const llvm::APInt &Val = Const->getOperand(1).getCImm()->getValue();
2239 const uint32_t ThreadId = Val.getZExtValue();
2240
2241 // Extract the thread ID from the loaded vector value.
2242 MachineBasicBlock &BB = *I.getParent();
2243 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
2244 .addDef(ResVReg)
2245 .addUse(GR.getSPIRVTypeID(ResType))
2246 .addUse(LoadedRegister)
2247 .addImm(ThreadId);
2248 return MIB.constrainAllUses(TII, TRI, RBI);
2249}
2250
2251namespace llvm {
2254 const SPIRVSubtarget &Subtarget,
2255 const RegisterBankInfo &RBI) {
2256 return new SPIRVInstructionSelector(TM, Subtarget, RBI);
2257}
2258} // namespace llvm
unsigned const MachineRegisterInfo * MRI
#define Success
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Size
#define DEBUG_TYPE
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
const char LLVMTargetMachineRef TM
static StringRef getName(Value *V)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
std::vector< std::pair< SPIRV::InstructionSet::InstructionSet, uint32_t > > ExtInstList
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC)
static APFloat getZeroFP(const Type *LLVMFloatTy)
static void addMemoryOperands(MachineMemOperand *MemOp, MachineInstrBuilder &MIB)
static unsigned getFCmpOpcode(unsigned PredNum)
bool isTypeFoldingSupported(unsigned Opcode)
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static SPIRV::Scope::Scope getScope(SyncScope::ID Ord, SPIRVMachineModuleInfo *MMI)
static unsigned getBoolCmpOpcode(unsigned PredNum)
static unsigned getICmpOpcode(unsigned PredNum)
static bool isConstReg(MachineRegisterInfo *MRI, SPIRVType *OpDef)
static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
static unsigned getPtrCmpOpcode(unsigned Pred)
static unsigned getArrayComponentCount(MachineRegisterInfo *MRI, const SPIRVType *ResType)
raw_pwrite_stream & OS
APInt bitcastToAPInt() const
Definition: APFloat.h:1210
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Definition: APFloat.h:957
Class for arbitrary precision integers.
Definition: APInt.h:76
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:212
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1491
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:993
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:996
@ ICMP_SLT
signed less than
Definition: InstrTypes.h:1022
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:1023
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:999
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition: InstrTypes.h:1008
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition: InstrTypes.h:997
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition: InstrTypes.h:998
@ ICMP_UGE
unsigned greater or equal
Definition: InstrTypes.h:1017
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:1016
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:1020
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition: InstrTypes.h:1007
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:1001
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:1004
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:1018
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition: InstrTypes.h:1005
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:1000
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:1002
@ ICMP_EQ
equal
Definition: InstrTypes.h:1014
@ ICMP_NE
not equal
Definition: InstrTypes.h:1015
@ ICMP_SGE
signed greater or equal
Definition: InstrTypes.h:1021
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:1009
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:1019
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition: InstrTypes.h:1006
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:1003
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:268
const APFloat & getValueAPF() const
Definition: Constants.h:311
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:154
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:145
This is an important base class in LLVM.
Definition: Constant.h:41
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:370
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
const Function & getFunction() const
Definition: Function.h:161
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:356
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:281
LinkageTypes getLinkage() const
Definition: GlobalValue.h:546
unsigned getAddressSpace() const
Definition: GlobalValue.h:205
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:656
static std::string getGlobalIdentifier(StringRef Name, GlobalValue::LinkageTypes Linkage, StringRef FileName)
Return the modified name for a global value suitable to be used as the key for a global lookup (e....
Definition: Globals.cpp:144
bool hasAvailableExternallyLinkage() const
Definition: GlobalValue.h:512
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:59
@ LinkOnceODRLinkage
Same, but only replaced by something equivalent.
Definition: GlobalValue.h:55
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:278
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
Definition: LowLevelType.h:57
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
SyncScope::ID getOrInsertSyncScopeID(StringRef SSN)
getOrInsertSyncScopeID - Maps synchronization scope name to synchronization scope ID.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
iterator SkipPHIsAndLabels(iterator I)
Return the first instruction in MBB after I that is not a PHI or a label.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
MachineModuleInfo & getMMI() const
const MachineBasicBlock & front() const
Helper class to build MachineInstr.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:546
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:556
A description of a memory reference used in the backend.
This class can be derived from and used by targets to hold private target-specific information for ea...
This class contains meta information specific to a module.
const Module * getModule() const
Ty & getObjFileInfo()
Keep track of various per-module pieces of information for backends that would like to do so.
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
Register getReg() const
getReg - Returns the register number.
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
LLVMContext & getContext() const
Get the global data context.
Definition: Module.h:301
Analysis providing profile information.
Holds all the information related to register banks.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isValid() const
Definition: Register.h:116
SPIRVMachineModuleInfo(const MachineModuleInfo &MMI)
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
std::string str() const
Return the twine contents as a std::string.
Definition: Twine.cpp:17
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition: Type.h:252
Type * getArrayElementType() const
Definition: Type.h:404
uint64_t getArrayNumElements() const
@ HalfTyID
16-bit floating point type
Definition: Type.h:56
@ FloatTyID
32-bit floating point type
Definition: Type.h:58
@ DoubleTyID
64-bit floating point type
Definition: Type.h:59
TypeID getTypeID() const
Return the type id for the type.
Definition: Type.h:137
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:348
bool hasName() const
Definition: Value.h:261
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:316
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:660
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
Definition: LLVMContext.h:54
@ System
Synchronized with respect to all concurrently executing threads.
Definition: LLVMContext.h:57
Reg
All possible values of the reg field in the ModR/M byte.
NodeAddr< DefNode * > Def
Definition: RDFGraph.h:384
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
Definition: SPIRVUtils.cpp:80
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition: Utils.cpp:153
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
Definition: TargetOpcodes.h:30
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
Definition: SPIRVUtils.cpp:241
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
Definition: SPIRVUtils.cpp:190
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Definition: SPIRVUtils.cpp:117
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
Definition: SPIRVUtils.cpp:162
AtomicOrdering
Atomic ordering for LLVM's memory model.
InstructionSelector * createSPIRVInstructionSelector(const SPIRVTargetMachine &TM, const SPIRVSubtarget &Subtarget, const RegisterBankInfo &RBI)
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
Definition: SPIRVUtils.cpp:247
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
Definition: SPIRVUtils.cpp:208
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
#define N