LLVM  16.0.0git
SPIRVInstructionSelector.cpp
Go to the documentation of this file.
1 //===- SPIRVInstructionSelector.cpp ------------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the targeting of the InstructionSelector class for
10 // SPIRV.
11 // TODO: This should be generated by TableGen.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "SPIRV.h"
16 #include "SPIRVGlobalRegistry.h"
17 #include "SPIRVInstrInfo.h"
18 #include "SPIRVRegisterBankInfo.h"
19 #include "SPIRVRegisterInfo.h"
20 #include "SPIRVTargetMachine.h"
21 #include "SPIRVUtils.h"
22 #include "llvm/ADT/APFloat.h"
27 #include "llvm/IR/IntrinsicsSPIRV.h"
28 #include "llvm/Support/Debug.h"
29 
30 #define DEBUG_TYPE "spirv-isel"
31 
32 using namespace llvm;
33 namespace CL = SPIRV::OpenCLExtInst;
34 namespace GL = SPIRV::GLSLExtInst;
35 
36 using ExtInstList =
37  std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
38 
39 namespace {
40 
41 #define GET_GLOBALISEL_PREDICATE_BITSET
42 #include "SPIRVGenGlobalISel.inc"
43 #undef GET_GLOBALISEL_PREDICATE_BITSET
44 
45 class SPIRVInstructionSelector : public InstructionSelector {
46  const SPIRVSubtarget &STI;
47  const SPIRVInstrInfo &TII;
48  const SPIRVRegisterInfo &TRI;
49  const RegisterBankInfo &RBI;
52 
53 public:
54  SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
55  const SPIRVSubtarget &ST,
56  const RegisterBankInfo &RBI);
57  void setupMF(MachineFunction &MF, GISelKnownBits *KB,
58  CodeGenCoverage &CoverageInfo, ProfileSummaryInfo *PSI,
59  BlockFrequencyInfo *BFI) override;
60  // Common selection code. Instruction-specific selection occurs in spvSelect.
61  bool select(MachineInstr &I) override;
62  static const char *getName() { return DEBUG_TYPE; }
63 
64 #define GET_GLOBALISEL_PREDICATES_DECL
65 #include "SPIRVGenGlobalISel.inc"
66 #undef GET_GLOBALISEL_PREDICATES_DECL
67 
68 #define GET_GLOBALISEL_TEMPORARIES_DECL
69 #include "SPIRVGenGlobalISel.inc"
70 #undef GET_GLOBALISEL_TEMPORARIES_DECL
71 
72 private:
73  // tblgen-erated 'select' implementation, used as the initial selector for
74  // the patterns that don't require complex C++.
75  bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
76 
77  // All instruction-specific selection that didn't happen in "select()".
78  // Is basically a large Switch/Case delegating to all other select method.
79  bool spvSelect(Register ResVReg, const SPIRVType *ResType,
80  MachineInstr &I) const;
81 
82  bool selectGlobalValue(Register ResVReg, MachineInstr &I,
83  const MachineInstr *Init = nullptr) const;
84 
85  bool selectUnOpWithSrc(Register ResVReg, const SPIRVType *ResType,
86  MachineInstr &I, Register SrcReg,
87  unsigned Opcode) const;
88  bool selectUnOp(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
89  unsigned Opcode) const;
90 
91  bool selectLoad(Register ResVReg, const SPIRVType *ResType,
92  MachineInstr &I) const;
93  bool selectStore(MachineInstr &I) const;
94 
95  bool selectMemOperation(Register ResVReg, MachineInstr &I) const;
96 
97  bool selectAtomicRMW(Register ResVReg, const SPIRVType *ResType,
98  MachineInstr &I, unsigned NewOpcode) const;
99 
100  bool selectAtomicCmpXchg(Register ResVReg, const SPIRVType *ResType,
101  MachineInstr &I) const;
102 
103  bool selectFence(MachineInstr &I) const;
104 
105  bool selectAddrSpaceCast(Register ResVReg, const SPIRVType *ResType,
106  MachineInstr &I) const;
107 
108  bool selectBitreverse(Register ResVReg, const SPIRVType *ResType,
109  MachineInstr &I) const;
110 
111  bool selectConstVector(Register ResVReg, const SPIRVType *ResType,
112  MachineInstr &I) const;
113 
114  bool selectCmp(Register ResVReg, const SPIRVType *ResType,
115  unsigned comparisonOpcode, MachineInstr &I) const;
116 
117  bool selectICmp(Register ResVReg, const SPIRVType *ResType,
118  MachineInstr &I) const;
119  bool selectFCmp(Register ResVReg, const SPIRVType *ResType,
120  MachineInstr &I) const;
121 
122  void renderImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
123  int OpIdx) const;
124  void renderFImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
125  int OpIdx) const;
126 
127  bool selectConst(Register ResVReg, const SPIRVType *ResType, const APInt &Imm,
128  MachineInstr &I) const;
129 
130  bool selectSelect(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
131  bool IsSigned) const;
132  bool selectIToF(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
133  bool IsSigned, unsigned Opcode) const;
134  bool selectExt(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
135  bool IsSigned) const;
136 
137  bool selectTrunc(Register ResVReg, const SPIRVType *ResType,
138  MachineInstr &I) const;
139 
140  bool selectIntToBool(Register IntReg, Register ResVReg, MachineInstr &I,
141  const SPIRVType *intTy, const SPIRVType *boolTy) const;
142 
143  bool selectOpUndef(Register ResVReg, const SPIRVType *ResType,
144  MachineInstr &I) const;
145  bool selectIntrinsic(Register ResVReg, const SPIRVType *ResType,
146  MachineInstr &I) const;
147  bool selectExtractVal(Register ResVReg, const SPIRVType *ResType,
148  MachineInstr &I) const;
149  bool selectInsertVal(Register ResVReg, const SPIRVType *ResType,
150  MachineInstr &I) const;
151  bool selectExtractElt(Register ResVReg, const SPIRVType *ResType,
152  MachineInstr &I) const;
153  bool selectInsertElt(Register ResVReg, const SPIRVType *ResType,
154  MachineInstr &I) const;
155  bool selectGEP(Register ResVReg, const SPIRVType *ResType,
156  MachineInstr &I) const;
157 
158  bool selectFrameIndex(Register ResVReg, const SPIRVType *ResType,
159  MachineInstr &I) const;
160 
161  bool selectBranch(MachineInstr &I) const;
162  bool selectBranchCond(MachineInstr &I) const;
163 
164  bool selectPhi(Register ResVReg, const SPIRVType *ResType,
165  MachineInstr &I) const;
166 
167  bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
168  MachineInstr &I, CL::OpenCLExtInst CLInst) const;
169  bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
170  MachineInstr &I, CL::OpenCLExtInst CLInst,
171  GL::GLSLExtInst GLInst) const;
172  bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
173  MachineInstr &I, const ExtInstList &ExtInsts) const;
174 
175  Register buildI32Constant(uint32_t Val, MachineInstr &I,
176  const SPIRVType *ResType = nullptr) const;
177 
178  Register buildZerosVal(const SPIRVType *ResType, MachineInstr &I) const;
179  Register buildOnesVal(bool AllOnes, const SPIRVType *ResType,
180  MachineInstr &I) const;
181 };
182 
183 } // end anonymous namespace
184 
185 #define GET_GLOBALISEL_IMPL
186 #include "SPIRVGenGlobalISel.inc"
187 #undef GET_GLOBALISEL_IMPL
188 
189 SPIRVInstructionSelector::SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
190  const SPIRVSubtarget &ST,
191  const RegisterBankInfo &RBI)
192  : InstructionSelector(), STI(ST), TII(*ST.getInstrInfo()),
193  TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
195 #include "SPIRVGenGlobalISel.inc"
198 #include "SPIRVGenGlobalISel.inc"
200 {
201 }
202 
203 void SPIRVInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
204  CodeGenCoverage &CoverageInfo,
205  ProfileSummaryInfo *PSI,
207  MRI = &MF.getRegInfo();
208  GR.setCurrentFunc(MF);
209  InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
210 }
211 
212 static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI);
213 
214 // Defined in SPIRVLegalizerInfo.cpp.
215 extern bool isTypeFoldingSupported(unsigned Opcode);
216 
218  assert(I.getParent() && "Instruction should be in a basic block!");
219  assert(I.getParent()->getParent() && "Instruction should be in a function!");
220 
221  Register Opcode = I.getOpcode();
222  // If it's not a GMIR instruction, we've selected it already.
223  if (!isPreISelGenericOpcode(Opcode)) {
224  if (Opcode == SPIRV::ASSIGN_TYPE) { // These pseudos aren't needed any more.
225  auto *Def = MRI->getVRegDef(I.getOperand(1).getReg());
226  if (isTypeFoldingSupported(Def->getOpcode())) {
227  auto Res = selectImpl(I, *CoverageInfo);
228  assert(Res || Def->getOpcode() == TargetOpcode::G_CONSTANT);
229  if (Res)
230  return Res;
231  }
232  MRI->replaceRegWith(I.getOperand(1).getReg(), I.getOperand(0).getReg());
233  I.removeFromParent();
234  return true;
235  } else if (I.getNumDefs() == 1) {
236  // Make all vregs 32 bits (for SPIR-V IDs).
237  MRI->setType(I.getOperand(0).getReg(), LLT::scalar(32));
238  }
239  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
240  }
241 
242  if (I.getNumOperands() != I.getNumExplicitOperands()) {
243  LLVM_DEBUG(errs() << "Generic instr has unexpected implicit operands\n");
244  return false;
245  }
246 
247  // Common code for getting return reg+type, and removing selected instr
248  // from parent occurs here. Instr-specific selection happens in spvSelect().
249  bool HasDefs = I.getNumDefs() > 0;
250  Register ResVReg = HasDefs ? I.getOperand(0).getReg() : Register(0);
251  SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) : nullptr;
252  assert(!HasDefs || ResType || I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
253  if (spvSelect(ResVReg, ResType, I)) {
254  if (HasDefs) // Make all vregs 32 bits (for SPIR-V IDs).
255  MRI->setType(ResVReg, LLT::scalar(32));
256  I.removeFromParent();
257  return true;
258  }
259  return false;
260 }
261 
262 bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
263  const SPIRVType *ResType,
264  MachineInstr &I) const {
265  assert(!isTypeFoldingSupported(I.getOpcode()) ||
266  I.getOpcode() == TargetOpcode::G_CONSTANT);
267  const unsigned Opcode = I.getOpcode();
268  switch (Opcode) {
269  case TargetOpcode::G_CONSTANT:
270  return selectConst(ResVReg, ResType, I.getOperand(1).getCImm()->getValue(),
271  I);
272  case TargetOpcode::G_GLOBAL_VALUE:
273  return selectGlobalValue(ResVReg, I);
274  case TargetOpcode::G_IMPLICIT_DEF:
275  return selectOpUndef(ResVReg, ResType, I);
276 
277  case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
278  return selectIntrinsic(ResVReg, ResType, I);
279  case TargetOpcode::G_BITREVERSE:
280  return selectBitreverse(ResVReg, ResType, I);
281 
282  case TargetOpcode::G_BUILD_VECTOR:
283  return selectConstVector(ResVReg, ResType, I);
284 
285  case TargetOpcode::G_SHUFFLE_VECTOR: {
286  MachineBasicBlock &BB = *I.getParent();
287  auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorShuffle))
288  .addDef(ResVReg)
289  .addUse(GR.getSPIRVTypeID(ResType))
290  .addUse(I.getOperand(1).getReg())
291  .addUse(I.getOperand(2).getReg());
292  for (auto V : I.getOperand(3).getShuffleMask())
293  MIB.addImm(V);
294  return MIB.constrainAllUses(TII, TRI, RBI);
295  }
296  case TargetOpcode::G_MEMMOVE:
297  case TargetOpcode::G_MEMCPY:
298  case TargetOpcode::G_MEMSET:
299  return selectMemOperation(ResVReg, I);
300 
301  case TargetOpcode::G_ICMP:
302  return selectICmp(ResVReg, ResType, I);
303  case TargetOpcode::G_FCMP:
304  return selectFCmp(ResVReg, ResType, I);
305 
306  case TargetOpcode::G_FRAME_INDEX:
307  return selectFrameIndex(ResVReg, ResType, I);
308 
309  case TargetOpcode::G_LOAD:
310  return selectLoad(ResVReg, ResType, I);
311  case TargetOpcode::G_STORE:
312  return selectStore(I);
313 
314  case TargetOpcode::G_BR:
315  return selectBranch(I);
316  case TargetOpcode::G_BRCOND:
317  return selectBranchCond(I);
318 
319  case TargetOpcode::G_PHI:
320  return selectPhi(ResVReg, ResType, I);
321 
322  case TargetOpcode::G_FPTOSI:
323  return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToS);
324  case TargetOpcode::G_FPTOUI:
325  return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToU);
326 
327  case TargetOpcode::G_SITOFP:
328  return selectIToF(ResVReg, ResType, I, true, SPIRV::OpConvertSToF);
329  case TargetOpcode::G_UITOFP:
330  return selectIToF(ResVReg, ResType, I, false, SPIRV::OpConvertUToF);
331 
332  case TargetOpcode::G_CTPOP:
333  return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitCount);
334  case TargetOpcode::G_SMIN:
335  return selectExtInst(ResVReg, ResType, I, CL::s_min, GL::SMin);
336  case TargetOpcode::G_UMIN:
337  return selectExtInst(ResVReg, ResType, I, CL::u_min, GL::UMin);
338 
339  case TargetOpcode::G_SMAX:
340  return selectExtInst(ResVReg, ResType, I, CL::s_max, GL::SMax);
341  case TargetOpcode::G_UMAX:
342  return selectExtInst(ResVReg, ResType, I, CL::u_max, GL::UMax);
343 
344  case TargetOpcode::G_FMA:
345  return selectExtInst(ResVReg, ResType, I, CL::fma, GL::Fma);
346 
347  case TargetOpcode::G_FPOW:
348  return selectExtInst(ResVReg, ResType, I, CL::pow, GL::Pow);
349  case TargetOpcode::G_FPOWI:
350  return selectExtInst(ResVReg, ResType, I, CL::pown);
351 
352  case TargetOpcode::G_FEXP:
353  return selectExtInst(ResVReg, ResType, I, CL::exp, GL::Exp);
354  case TargetOpcode::G_FEXP2:
355  return selectExtInst(ResVReg, ResType, I, CL::exp2, GL::Exp2);
356 
357  case TargetOpcode::G_FLOG:
358  return selectExtInst(ResVReg, ResType, I, CL::log, GL::Log);
359  case TargetOpcode::G_FLOG2:
360  return selectExtInst(ResVReg, ResType, I, CL::log2, GL::Log2);
361  case TargetOpcode::G_FLOG10:
362  return selectExtInst(ResVReg, ResType, I, CL::log10);
363 
364  case TargetOpcode::G_FABS:
365  return selectExtInst(ResVReg, ResType, I, CL::fabs, GL::FAbs);
366  case TargetOpcode::G_ABS:
367  return selectExtInst(ResVReg, ResType, I, CL::s_abs, GL::SAbs);
368 
369  case TargetOpcode::G_FMINNUM:
370  case TargetOpcode::G_FMINIMUM:
371  return selectExtInst(ResVReg, ResType, I, CL::fmin, GL::FMin);
372  case TargetOpcode::G_FMAXNUM:
373  case TargetOpcode::G_FMAXIMUM:
374  return selectExtInst(ResVReg, ResType, I, CL::fmax, GL::FMax);
375 
376  case TargetOpcode::G_FCOPYSIGN:
377  return selectExtInst(ResVReg, ResType, I, CL::copysign);
378 
379  case TargetOpcode::G_FCEIL:
380  return selectExtInst(ResVReg, ResType, I, CL::ceil, GL::Ceil);
381  case TargetOpcode::G_FFLOOR:
382  return selectExtInst(ResVReg, ResType, I, CL::floor, GL::Floor);
383 
384  case TargetOpcode::G_FCOS:
385  return selectExtInst(ResVReg, ResType, I, CL::cos, GL::Cos);
386  case TargetOpcode::G_FSIN:
387  return selectExtInst(ResVReg, ResType, I, CL::sin, GL::Sin);
388 
389  case TargetOpcode::G_FSQRT:
390  return selectExtInst(ResVReg, ResType, I, CL::sqrt, GL::Sqrt);
391 
392  case TargetOpcode::G_CTTZ:
393  case TargetOpcode::G_CTTZ_ZERO_UNDEF:
394  return selectExtInst(ResVReg, ResType, I, CL::ctz);
395  case TargetOpcode::G_CTLZ:
396  case TargetOpcode::G_CTLZ_ZERO_UNDEF:
397  return selectExtInst(ResVReg, ResType, I, CL::clz);
398 
399  case TargetOpcode::G_INTRINSIC_ROUND:
400  return selectExtInst(ResVReg, ResType, I, CL::round, GL::Round);
401  case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
402  return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
403  case TargetOpcode::G_INTRINSIC_TRUNC:
404  return selectExtInst(ResVReg, ResType, I, CL::trunc, GL::Trunc);
405  case TargetOpcode::G_FRINT:
406  case TargetOpcode::G_FNEARBYINT:
407  return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
408 
409  case TargetOpcode::G_SMULH:
410  return selectExtInst(ResVReg, ResType, I, CL::s_mul_hi);
411  case TargetOpcode::G_UMULH:
412  return selectExtInst(ResVReg, ResType, I, CL::u_mul_hi);
413 
414  case TargetOpcode::G_SEXT:
415  return selectExt(ResVReg, ResType, I, true);
416  case TargetOpcode::G_ANYEXT:
417  case TargetOpcode::G_ZEXT:
418  return selectExt(ResVReg, ResType, I, false);
419  case TargetOpcode::G_TRUNC:
420  return selectTrunc(ResVReg, ResType, I);
421  case TargetOpcode::G_FPTRUNC:
422  case TargetOpcode::G_FPEXT:
423  return selectUnOp(ResVReg, ResType, I, SPIRV::OpFConvert);
424 
425  case TargetOpcode::G_PTRTOINT:
426  return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertPtrToU);
427  case TargetOpcode::G_INTTOPTR:
428  return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertUToPtr);
429  case TargetOpcode::G_BITCAST:
430  return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
431  case TargetOpcode::G_ADDRSPACE_CAST:
432  return selectAddrSpaceCast(ResVReg, ResType, I);
433  case TargetOpcode::G_PTR_ADD: {
434  // Currently, we get G_PTR_ADD only as a result of translating
435  // global variables, initialized with constant expressions like GV + Const
436  // (see test opencl/basic/progvar_prog_scope_init.ll).
437  // TODO: extend the handler once we have other cases.
438  assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
439  Register GV = I.getOperand(1).getReg();
441  assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
442  (*II).getOpcode() == TargetOpcode::COPY ||
443  (*II).getOpcode() == SPIRV::OpVariable) &&
444  isImm(I.getOperand(2), MRI));
445  Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32, I, TII), I);
446  MachineBasicBlock &BB = *I.getParent();
447  auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
448  .addDef(ResVReg)
449  .addUse(GR.getSPIRVTypeID(ResType))
450  .addImm(static_cast<uint32_t>(
451  SPIRV::Opcode::InBoundsPtrAccessChain))
452  .addUse(GV)
453  .addUse(Idx)
454  .addUse(I.getOperand(2).getReg());
455  return MIB.constrainAllUses(TII, TRI, RBI);
456  }
457 
458  case TargetOpcode::G_ATOMICRMW_OR:
459  return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicOr);
460  case TargetOpcode::G_ATOMICRMW_ADD:
461  return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicIAdd);
462  case TargetOpcode::G_ATOMICRMW_AND:
463  return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicAnd);
464  case TargetOpcode::G_ATOMICRMW_MAX:
465  return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMax);
466  case TargetOpcode::G_ATOMICRMW_MIN:
467  return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMin);
468  case TargetOpcode::G_ATOMICRMW_SUB:
469  return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicISub);
470  case TargetOpcode::G_ATOMICRMW_XOR:
471  return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicXor);
472  case TargetOpcode::G_ATOMICRMW_UMAX:
473  return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMax);
474  case TargetOpcode::G_ATOMICRMW_UMIN:
475  return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMin);
476  case TargetOpcode::G_ATOMICRMW_XCHG:
477  return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicExchange);
478  case TargetOpcode::G_ATOMIC_CMPXCHG:
479  return selectAtomicCmpXchg(ResVReg, ResType, I);
480 
481  case TargetOpcode::G_FENCE:
482  return selectFence(I);
483 
484  default:
485  return false;
486  }
487 }
488 
489 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
490  const SPIRVType *ResType,
491  MachineInstr &I,
492  CL::OpenCLExtInst CLInst) const {
493  return selectExtInst(ResVReg, ResType, I,
494  {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
495 }
496 
497 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
498  const SPIRVType *ResType,
499  MachineInstr &I,
500  CL::OpenCLExtInst CLInst,
501  GL::GLSLExtInst GLInst) const {
502  ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
503  {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
504  return selectExtInst(ResVReg, ResType, I, ExtInsts);
505 }
506 
507 bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
508  const SPIRVType *ResType,
509  MachineInstr &I,
510  const ExtInstList &Insts) const {
511 
512  for (const auto &Ex : Insts) {
513  SPIRV::InstructionSet::InstructionSet Set = Ex.first;
514  uint32_t Opcode = Ex.second;
515  if (STI.canUseExtInstSet(Set)) {
516  MachineBasicBlock &BB = *I.getParent();
517  auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
518  .addDef(ResVReg)
519  .addUse(GR.getSPIRVTypeID(ResType))
520  .addImm(static_cast<uint32_t>(Set))
521  .addImm(Opcode);
522  const unsigned NumOps = I.getNumOperands();
523  for (unsigned i = 1; i < NumOps; ++i)
524  MIB.add(I.getOperand(i));
525  return MIB.constrainAllUses(TII, TRI, RBI);
526  }
527  }
528  return false;
529 }
530 
531 bool SPIRVInstructionSelector::selectUnOpWithSrc(Register ResVReg,
532  const SPIRVType *ResType,
533  MachineInstr &I,
534  Register SrcReg,
535  unsigned Opcode) const {
536  return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
537  .addDef(ResVReg)
538  .addUse(GR.getSPIRVTypeID(ResType))
539  .addUse(SrcReg)
540  .constrainAllUses(TII, TRI, RBI);
541 }
542 
543 bool SPIRVInstructionSelector::selectUnOp(Register ResVReg,
544  const SPIRVType *ResType,
545  MachineInstr &I,
546  unsigned Opcode) const {
547  return selectUnOpWithSrc(ResVReg, ResType, I, I.getOperand(1).getReg(),
548  Opcode);
549 }
550 
552  switch (Ord) {
554  return SPIRV::Scope::Invocation;
555  case SyncScope::System:
556  return SPIRV::Scope::Device;
557  default:
558  llvm_unreachable("Unsupported synchronization Scope ID.");
559  }
560 }
561 
563  MachineInstrBuilder &MIB) {
564  uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
565  if (MemOp->isVolatile())
566  SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
567  if (MemOp->isNonTemporal())
568  SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
569  if (MemOp->getAlign().value())
570  SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
571 
572  if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
573  MIB.addImm(SpvMemOp);
574  if (SpvMemOp & static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
575  MIB.addImm(MemOp->getAlign().value());
576  }
577 }
578 
580  uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
581  if (Flags & MachineMemOperand::Flags::MOVolatile)
582  SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
583  if (Flags & MachineMemOperand::Flags::MONonTemporal)
584  SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
585 
586  if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None))
587  MIB.addImm(SpvMemOp);
588 }
589 
590 bool SPIRVInstructionSelector::selectLoad(Register ResVReg,
591  const SPIRVType *ResType,
592  MachineInstr &I) const {
593  unsigned OpOffset =
594  I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ? 1 : 0;
595  Register Ptr = I.getOperand(1 + OpOffset).getReg();
596  auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
597  .addDef(ResVReg)
598  .addUse(GR.getSPIRVTypeID(ResType))
599  .addUse(Ptr);
600  if (!I.getNumMemOperands()) {
601  assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS);
602  addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
603  } else {
604  addMemoryOperands(*I.memoperands_begin(), MIB);
605  }
606  return MIB.constrainAllUses(TII, TRI, RBI);
607 }
608 
609 bool SPIRVInstructionSelector::selectStore(MachineInstr &I) const {
610  unsigned OpOffset =
611  I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ? 1 : 0;
612  Register StoreVal = I.getOperand(0 + OpOffset).getReg();
613  Register Ptr = I.getOperand(1 + OpOffset).getReg();
614  MachineBasicBlock &BB = *I.getParent();
615  auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpStore))
616  .addUse(Ptr)
617  .addUse(StoreVal);
618  if (!I.getNumMemOperands()) {
619  assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS);
620  addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
621  } else {
622  addMemoryOperands(*I.memoperands_begin(), MIB);
623  }
624  return MIB.constrainAllUses(TII, TRI, RBI);
625 }
626 
627 bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
628  MachineInstr &I) const {
629  MachineBasicBlock &BB = *I.getParent();
630  Register SrcReg = I.getOperand(1).getReg();
631  if (I.getOpcode() == TargetOpcode::G_MEMSET) {
632  assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
633  unsigned Val = getIConstVal(I.getOperand(1).getReg(), MRI);
634  unsigned Num = getIConstVal(I.getOperand(2).getReg(), MRI);
635  SPIRVType *ValTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
636  SPIRVType *ArrTy = GR.getOrCreateSPIRVArrayType(ValTy, Num, I, TII);
637  Register Const = GR.getOrCreateConsIntArray(Val, I, ArrTy, TII);
638  SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType(
639  ArrTy, I, TII, SPIRV::StorageClass::UniformConstant);
640  // TODO: check if we have such GV, add init, use buildGlobalVariable.
641  Type *LLVMArrTy = ArrayType::get(
642  IntegerType::get(GR.CurMF->getFunction().getContext(), 8), Num);
643  GlobalVariable *GV =
644  new GlobalVariable(LLVMArrTy, true, GlobalValue::InternalLinkage);
645  Register VarReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
646  GR.add(GV, GR.CurMF, VarReg);
647 
649  BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
650  .addDef(VarReg)
651  .addUse(GR.getSPIRVTypeID(VarTy))
652  .addImm(SPIRV::StorageClass::UniformConstant)
653  .addUse(Const)
654  .constrainAllUses(TII, TRI, RBI);
655  SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType(
656  ValTy, I, TII, SPIRV::StorageClass::UniformConstant);
657  SrcReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
658  selectUnOpWithSrc(SrcReg, SourceTy, I, VarReg, SPIRV::OpBitcast);
659  }
660  auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCopyMemorySized))
661  .addUse(I.getOperand(0).getReg())
662  .addUse(SrcReg)
663  .addUse(I.getOperand(2).getReg());
664  if (I.getNumMemOperands())
665  addMemoryOperands(*I.memoperands_begin(), MIB);
666  bool Result = MIB.constrainAllUses(TII, TRI, RBI);
667  if (ResVReg.isValid() && ResVReg != MIB->getOperand(0).getReg())
668  BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg)
669  .addUse(MIB->getOperand(0).getReg());
670  return Result;
671 }
672 
673 bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
674  const SPIRVType *ResType,
675  MachineInstr &I,
676  unsigned NewOpcode) const {
677  assert(I.hasOneMemOperand());
678  const MachineMemOperand *MemOp = *I.memoperands_begin();
679  uint32_t Scope = static_cast<uint32_t>(getScope(MemOp->getSyncScopeID()));
680  Register ScopeReg = buildI32Constant(Scope, I);
681 
682  Register Ptr = I.getOperand(1).getReg();
683  // TODO: Changed as it's implemented in the translator. See test/atomicrmw.ll
684  // auto ScSem =
685  // getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr));
686  AtomicOrdering AO = MemOp->getSuccessOrdering();
687  uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
688  Register MemSemReg = buildI32Constant(MemSem /*| ScSem*/, I);
689 
690  return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode))
691  .addDef(ResVReg)
692  .addUse(GR.getSPIRVTypeID(ResType))
693  .addUse(Ptr)
694  .addUse(ScopeReg)
695  .addUse(MemSemReg)
696  .addUse(I.getOperand(2).getReg())
697  .constrainAllUses(TII, TRI, RBI);
698 }
699 
700 bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const {
701  AtomicOrdering AO = AtomicOrdering(I.getOperand(0).getImm());
702  uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
703  Register MemSemReg = buildI32Constant(MemSem, I);
704  SyncScope::ID Ord = SyncScope::ID(I.getOperand(1).getImm());
705  uint32_t Scope = static_cast<uint32_t>(getScope(Ord));
706  Register ScopeReg = buildI32Constant(Scope, I);
707  MachineBasicBlock &BB = *I.getParent();
708  return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier))
709  .addUse(ScopeReg)
710  .addUse(MemSemReg)
711  .constrainAllUses(TII, TRI, RBI);
712 }
713 
714 bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
715  const SPIRVType *ResType,
716  MachineInstr &I) const {
717  Register ScopeReg;
718  Register MemSemEqReg;
719  Register MemSemNeqReg;
720  Register Ptr = I.getOperand(2).getReg();
721  if (I.getOpcode() != TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS) {
722  assert(I.hasOneMemOperand());
723  const MachineMemOperand *MemOp = *I.memoperands_begin();
724  unsigned Scope = static_cast<uint32_t>(getScope(MemOp->getSyncScopeID()));
725  ScopeReg = buildI32Constant(Scope, I);
726 
727  unsigned ScSem = static_cast<uint32_t>(
728  getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr)));
729  AtomicOrdering AO = MemOp->getSuccessOrdering();
730  unsigned MemSemEq = static_cast<uint32_t>(getMemSemantics(AO)) | ScSem;
731  MemSemEqReg = buildI32Constant(MemSemEq, I);
732  AtomicOrdering FO = MemOp->getFailureOrdering();
733  unsigned MemSemNeq = static_cast<uint32_t>(getMemSemantics(FO)) | ScSem;
734  MemSemNeqReg =
735  MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq, I);
736  } else {
737  ScopeReg = I.getOperand(5).getReg();
738  MemSemEqReg = I.getOperand(6).getReg();
739  MemSemNeqReg = I.getOperand(7).getReg();
740  }
741 
742  Register Cmp = I.getOperand(3).getReg();
743  Register Val = I.getOperand(4).getReg();
744  SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
745  Register ACmpRes = MRI->createVirtualRegister(&SPIRV::IDRegClass);
746  const DebugLoc &DL = I.getDebugLoc();
747  bool Result =
748  BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange))
749  .addDef(ACmpRes)
750  .addUse(GR.getSPIRVTypeID(SpvValTy))
751  .addUse(Ptr)
752  .addUse(ScopeReg)
753  .addUse(MemSemEqReg)
754  .addUse(MemSemNeqReg)
755  .addUse(Val)
756  .addUse(Cmp)
757  .constrainAllUses(TII, TRI, RBI);
758  Register CmpSuccReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
759  SPIRVType *BoolTy = GR.getOrCreateSPIRVBoolType(I, TII);
760  Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpIEqual))
761  .addDef(CmpSuccReg)
762  .addUse(GR.getSPIRVTypeID(BoolTy))
763  .addUse(ACmpRes)
764  .addUse(Cmp)
765  .constrainAllUses(TII, TRI, RBI);
766  Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
767  Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
768  .addDef(TmpReg)
769  .addUse(GR.getSPIRVTypeID(ResType))
770  .addUse(ACmpRes)
771  .addUse(GR.getOrCreateUndef(I, ResType, TII))
772  .addImm(0)
773  .constrainAllUses(TII, TRI, RBI);
774  Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
775  .addDef(ResVReg)
776  .addUse(GR.getSPIRVTypeID(ResType))
777  .addUse(CmpSuccReg)
778  .addUse(TmpReg)
779  .addImm(1)
780  .constrainAllUses(TII, TRI, RBI);
781  return Result;
782 }
783 
785  switch (SC) {
786  case SPIRV::StorageClass::Workgroup:
787  case SPIRV::StorageClass::CrossWorkgroup:
788  case SPIRV::StorageClass::Function:
789  return true;
790  default:
791  return false;
792  }
793 }
794 
795 // In SPIR-V address space casting can only happen to and from the Generic
796 // storage class. We can also only case Workgroup, CrossWorkgroup, or Function
797 // pointers to and from Generic pointers. As such, we can convert e.g. from
798 // Workgroup to Function by going via a Generic pointer as an intermediary. All
799 // other combinations can only be done by a bitcast, and are probably not safe.
800 bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg,
801  const SPIRVType *ResType,
802  MachineInstr &I) const {
803  // If the AddrSpaceCast user is single and in OpConstantComposite or
804  // OpVariable, we should select OpSpecConstantOp.
805  auto UIs = MRI->use_instructions(ResVReg);
806  if (!UIs.empty() && ++UIs.begin() == UIs.end() &&
807  (UIs.begin()->getOpcode() == SPIRV::OpConstantComposite ||
808  UIs.begin()->getOpcode() == SPIRV::OpVariable ||
809  isSpvIntrinsic(*UIs.begin(), Intrinsic::spv_init_global))) {
810  Register NewReg = I.getOperand(1).getReg();
811  MachineBasicBlock &BB = *I.getParent();
812  SPIRVType *SpvBaseTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
813  ResType = GR.getOrCreateSPIRVPointerType(SpvBaseTy, I, TII,
815  bool Result =
816  BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
817  .addDef(ResVReg)
818  .addUse(GR.getSPIRVTypeID(ResType))
819  .addImm(static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric))
820  .addUse(NewReg)
821  .constrainAllUses(TII, TRI, RBI);
822  return Result;
823  }
824  Register SrcPtr = I.getOperand(1).getReg();
825  SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
826  SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtr);
827  SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResVReg);
828 
829  // Casting from an eligable pointer to Generic.
830  if (DstSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(SrcSC))
831  return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric);
832  // Casting from Generic to an eligable pointer.
833  if (SrcSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(DstSC))
834  return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr);
835  // Casting between 2 eligable pointers using Generic as an intermediary.
836  if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) {
837  Register Tmp = MRI->createVirtualRegister(&SPIRV::IDRegClass);
838  SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
839  SrcPtrTy, I, TII, SPIRV::StorageClass::Generic);
840  MachineBasicBlock &BB = *I.getParent();
841  const DebugLoc &DL = I.getDebugLoc();
842  bool Success = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric))
843  .addDef(Tmp)
844  .addUse(GR.getSPIRVTypeID(GenericPtrTy))
845  .addUse(SrcPtr)
846  .constrainAllUses(TII, TRI, RBI);
847  return Success && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr))
848  .addDef(ResVReg)
849  .addUse(GR.getSPIRVTypeID(ResType))
850  .addUse(Tmp)
851  .constrainAllUses(TII, TRI, RBI);
852  }
853  // TODO Should this case just be disallowed completely?
854  // We're casting 2 other arbitrary address spaces, so have to bitcast.
855  return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
856 }
857 
858 static unsigned getFCmpOpcode(unsigned PredNum) {
859  auto Pred = static_cast<CmpInst::Predicate>(PredNum);
860  switch (Pred) {
861  case CmpInst::FCMP_OEQ:
862  return SPIRV::OpFOrdEqual;
863  case CmpInst::FCMP_OGE:
864  return SPIRV::OpFOrdGreaterThanEqual;
865  case CmpInst::FCMP_OGT:
866  return SPIRV::OpFOrdGreaterThan;
867  case CmpInst::FCMP_OLE:
868  return SPIRV::OpFOrdLessThanEqual;
869  case CmpInst::FCMP_OLT:
870  return SPIRV::OpFOrdLessThan;
871  case CmpInst::FCMP_ONE:
872  return SPIRV::OpFOrdNotEqual;
873  case CmpInst::FCMP_ORD:
874  return SPIRV::OpOrdered;
875  case CmpInst::FCMP_UEQ:
876  return SPIRV::OpFUnordEqual;
877  case CmpInst::FCMP_UGE:
878  return SPIRV::OpFUnordGreaterThanEqual;
879  case CmpInst::FCMP_UGT:
880  return SPIRV::OpFUnordGreaterThan;
881  case CmpInst::FCMP_ULE:
882  return SPIRV::OpFUnordLessThanEqual;
883  case CmpInst::FCMP_ULT:
884  return SPIRV::OpFUnordLessThan;
885  case CmpInst::FCMP_UNE:
886  return SPIRV::OpFUnordNotEqual;
887  case CmpInst::FCMP_UNO:
888  return SPIRV::OpUnordered;
889  default:
890  llvm_unreachable("Unknown predicate type for FCmp");
891  }
892 }
893 
894 static unsigned getICmpOpcode(unsigned PredNum) {
895  auto Pred = static_cast<CmpInst::Predicate>(PredNum);
896  switch (Pred) {
897  case CmpInst::ICMP_EQ:
898  return SPIRV::OpIEqual;
899  case CmpInst::ICMP_NE:
900  return SPIRV::OpINotEqual;
901  case CmpInst::ICMP_SGE:
902  return SPIRV::OpSGreaterThanEqual;
903  case CmpInst::ICMP_SGT:
904  return SPIRV::OpSGreaterThan;
905  case CmpInst::ICMP_SLE:
906  return SPIRV::OpSLessThanEqual;
907  case CmpInst::ICMP_SLT:
908  return SPIRV::OpSLessThan;
909  case CmpInst::ICMP_UGE:
910  return SPIRV::OpUGreaterThanEqual;
911  case CmpInst::ICMP_UGT:
912  return SPIRV::OpUGreaterThan;
913  case CmpInst::ICMP_ULE:
914  return SPIRV::OpULessThanEqual;
915  case CmpInst::ICMP_ULT:
916  return SPIRV::OpULessThan;
917  default:
918  llvm_unreachable("Unknown predicate type for ICmp");
919  }
920 }
921 
922 static unsigned getPtrCmpOpcode(unsigned Pred) {
923  switch (static_cast<CmpInst::Predicate>(Pred)) {
924  case CmpInst::ICMP_EQ:
925  return SPIRV::OpPtrEqual;
926  case CmpInst::ICMP_NE:
927  return SPIRV::OpPtrNotEqual;
928  default:
929  llvm_unreachable("Unknown predicate type for pointer comparison");
930  }
931 }
932 
933 // Return the logical operation, or abort if none exists.
934 static unsigned getBoolCmpOpcode(unsigned PredNum) {
935  auto Pred = static_cast<CmpInst::Predicate>(PredNum);
936  switch (Pred) {
937  case CmpInst::ICMP_EQ:
938  return SPIRV::OpLogicalEqual;
939  case CmpInst::ICMP_NE:
940  return SPIRV::OpLogicalNotEqual;
941  default:
942  llvm_unreachable("Unknown predicate type for Bool comparison");
943  }
944 }
945 
946 bool SPIRVInstructionSelector::selectBitreverse(Register ResVReg,
947  const SPIRVType *ResType,
948  MachineInstr &I) const {
949  MachineBasicBlock &BB = *I.getParent();
950  return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitReverse))
951  .addDef(ResVReg)
952  .addUse(GR.getSPIRVTypeID(ResType))
953  .addUse(I.getOperand(1).getReg())
954  .constrainAllUses(TII, TRI, RBI);
955 }
956 
957 bool SPIRVInstructionSelector::selectConstVector(Register ResVReg,
958  const SPIRVType *ResType,
959  MachineInstr &I) const {
960  // TODO: only const case is supported for now.
962  I.operands_begin(), I.operands_end(), [this](const MachineOperand &MO) {
963  if (MO.isDef())
964  return true;
965  if (!MO.isReg())
966  return false;
967  SPIRVType *ConstTy = this->MRI->getVRegDef(MO.getReg());
968  assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
969  ConstTy->getOperand(1).isReg());
970  Register ConstReg = ConstTy->getOperand(1).getReg();
971  const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
972  assert(Const);
973  return (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
974  Const->getOpcode() == TargetOpcode::G_FCONSTANT);
975  }));
976 
977  auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
978  TII.get(SPIRV::OpConstantComposite))
979  .addDef(ResVReg)
980  .addUse(GR.getSPIRVTypeID(ResType));
981  for (unsigned i = I.getNumExplicitDefs(); i < I.getNumExplicitOperands(); ++i)
982  MIB.addUse(I.getOperand(i).getReg());
983  return MIB.constrainAllUses(TII, TRI, RBI);
984 }
985 
986 bool SPIRVInstructionSelector::selectCmp(Register ResVReg,
987  const SPIRVType *ResType,
988  unsigned CmpOpc,
989  MachineInstr &I) const {
990  Register Cmp0 = I.getOperand(2).getReg();
991  Register Cmp1 = I.getOperand(3).getReg();
992  assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
993  GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
994  "CMP operands should have the same type");
995  return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CmpOpc))
996  .addDef(ResVReg)
997  .addUse(GR.getSPIRVTypeID(ResType))
998  .addUse(Cmp0)
999  .addUse(Cmp1)
1000  .constrainAllUses(TII, TRI, RBI);
1001 }
1002 
1003 bool SPIRVInstructionSelector::selectICmp(Register ResVReg,
1004  const SPIRVType *ResType,
1005  MachineInstr &I) const {
1006  auto Pred = I.getOperand(1).getPredicate();
1007  unsigned CmpOpc;
1008 
1009  Register CmpOperand = I.getOperand(2).getReg();
1010  if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
1011  CmpOpc = getPtrCmpOpcode(Pred);
1012  else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
1013  CmpOpc = getBoolCmpOpcode(Pred);
1014  else
1015  CmpOpc = getICmpOpcode(Pred);
1016  return selectCmp(ResVReg, ResType, CmpOpc, I);
1017 }
1018 
1019 void SPIRVInstructionSelector::renderFImm32(MachineInstrBuilder &MIB,
1020  const MachineInstr &I,
1021  int OpIdx) const {
1022  assert(I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
1023  "Expected G_FCONSTANT");
1024  const ConstantFP *FPImm = I.getOperand(1).getFPImm();
1025  addNumImm(FPImm->getValueAPF().bitcastToAPInt(), MIB);
1026 }
1027 
1028 void SPIRVInstructionSelector::renderImm32(MachineInstrBuilder &MIB,
1029  const MachineInstr &I,
1030  int OpIdx) const {
1031  assert(I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1032  "Expected G_CONSTANT");
1033  addNumImm(I.getOperand(1).getCImm()->getValue(), MIB);
1034 }
1035 
1036 Register
1037 SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I,
1038  const SPIRVType *ResType) const {
1039  Type *LLVMTy = IntegerType::get(GR.CurMF->getFunction().getContext(), 32);
1040  const SPIRVType *SpvI32Ty =
1041  ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32, I, TII);
1042  // Find a constant in DT or build a new one.
1043  auto ConstInt = ConstantInt::get(LLVMTy, Val);
1044  Register NewReg = GR.find(ConstInt, GR.CurMF);
1045  if (!NewReg.isValid()) {
1046  NewReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
1047  GR.add(ConstInt, GR.CurMF, NewReg);
1048  MachineInstr *MI;
1049  MachineBasicBlock &BB = *I.getParent();
1050  if (Val == 0) {
1051  MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1052  .addDef(NewReg)
1053  .addUse(GR.getSPIRVTypeID(SpvI32Ty));
1054  } else {
1055  MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
1056  .addDef(NewReg)
1057  .addUse(GR.getSPIRVTypeID(SpvI32Ty))
1058  .addImm(APInt(32, Val).getZExtValue());
1059  }
1061  }
1062  return NewReg;
1063 }
1064 
1065 bool SPIRVInstructionSelector::selectFCmp(Register ResVReg,
1066  const SPIRVType *ResType,
1067  MachineInstr &I) const {
1068  unsigned CmpOp = getFCmpOpcode(I.getOperand(1).getPredicate());
1069  return selectCmp(ResVReg, ResType, CmpOp, I);
1070 }
1071 
1072 Register SPIRVInstructionSelector::buildZerosVal(const SPIRVType *ResType,
1073  MachineInstr &I) const {
1074  if (ResType->getOpcode() == SPIRV::OpTypeVector)
1075  return GR.getOrCreateConsIntVector(0, I, ResType, TII);
1076  return GR.getOrCreateConstInt(0, I, ResType, TII);
1077 }
1078 
1079 Register SPIRVInstructionSelector::buildOnesVal(bool AllOnes,
1080  const SPIRVType *ResType,
1081  MachineInstr &I) const {
1082  unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1083  APInt One = AllOnes ? APInt::getAllOnesValue(BitWidth)
1084  : APInt::getOneBitSet(BitWidth, 0);
1085  if (ResType->getOpcode() == SPIRV::OpTypeVector)
1086  return GR.getOrCreateConsIntVector(One.getZExtValue(), I, ResType, TII);
1087  return GR.getOrCreateConstInt(One.getZExtValue(), I, ResType, TII);
1088 }
1089 
1090 bool SPIRVInstructionSelector::selectSelect(Register ResVReg,
1091  const SPIRVType *ResType,
1092  MachineInstr &I,
1093  bool IsSigned) const {
1094  // To extend a bool, we need to use OpSelect between constants.
1095  Register ZeroReg = buildZerosVal(ResType, I);
1096  Register OneReg = buildOnesVal(IsSigned, ResType, I);
1097  bool IsScalarBool =
1098  GR.isScalarOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool);
1099  unsigned Opcode =
1100  IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectSIVCond;
1101  return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
1102  .addDef(ResVReg)
1103  .addUse(GR.getSPIRVTypeID(ResType))
1104  .addUse(I.getOperand(1).getReg())
1105  .addUse(OneReg)
1106  .addUse(ZeroReg)
1107  .constrainAllUses(TII, TRI, RBI);
1108 }
1109 
1110 bool SPIRVInstructionSelector::selectIToF(Register ResVReg,
1111  const SPIRVType *ResType,
1112  MachineInstr &I, bool IsSigned,
1113  unsigned Opcode) const {
1114  Register SrcReg = I.getOperand(1).getReg();
1115  // We can convert bool value directly to float type without OpConvert*ToF,
1116  // however the translator generates OpSelect+OpConvert*ToF, so we do the same.
1117  if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
1118  unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1119  SPIRVType *TmpType = GR.getOrCreateSPIRVIntegerType(BitWidth, I, TII);
1120  if (ResType->getOpcode() == SPIRV::OpTypeVector) {
1121  const unsigned NumElts = ResType->getOperand(2).getImm();
1122  TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII);
1123  }
1124  SrcReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1125  selectSelect(SrcReg, TmpType, I, false);
1126  }
1127  return selectUnOpWithSrc(ResVReg, ResType, I, SrcReg, Opcode);
1128 }
1129 
1130 bool SPIRVInstructionSelector::selectExt(Register ResVReg,
1131  const SPIRVType *ResType,
1132  MachineInstr &I, bool IsSigned) const {
1133  if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool))
1134  return selectSelect(ResVReg, ResType, I, IsSigned);
1135  unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1136  return selectUnOp(ResVReg, ResType, I, Opcode);
1137 }
1138 
1139 bool SPIRVInstructionSelector::selectIntToBool(Register IntReg,
1140  Register ResVReg,
1141  MachineInstr &I,
1142  const SPIRVType *IntTy,
1143  const SPIRVType *BoolTy) const {
1144  // To truncate to a bool, we use OpBitwiseAnd 1 and OpINotEqual to zero.
1145  Register BitIntReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1146  bool IsVectorTy = IntTy->getOpcode() == SPIRV::OpTypeVector;
1147  unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
1148  Register Zero = buildZerosVal(IntTy, I);
1149  Register One = buildOnesVal(false, IntTy, I);
1150  MachineBasicBlock &BB = *I.getParent();
1151  BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1152  .addDef(BitIntReg)
1153  .addUse(GR.getSPIRVTypeID(IntTy))
1154  .addUse(IntReg)
1155  .addUse(One)
1156  .constrainAllUses(TII, TRI, RBI);
1157  return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
1158  .addDef(ResVReg)
1159  .addUse(GR.getSPIRVTypeID(BoolTy))
1160  .addUse(BitIntReg)
1161  .addUse(Zero)
1162  .constrainAllUses(TII, TRI, RBI);
1163 }
1164 
1165 bool SPIRVInstructionSelector::selectTrunc(Register ResVReg,
1166  const SPIRVType *ResType,
1167  MachineInstr &I) const {
1168  if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool)) {
1169  Register IntReg = I.getOperand(1).getReg();
1170  const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
1171  return selectIntToBool(IntReg, ResVReg, I, ArgType, ResType);
1172  }
1173  bool IsSigned = GR.isScalarOrVectorSigned(ResType);
1174  unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1175  return selectUnOp(ResVReg, ResType, I, Opcode);
1176 }
1177 
1178 bool SPIRVInstructionSelector::selectConst(Register ResVReg,
1179  const SPIRVType *ResType,
1180  const APInt &Imm,
1181  MachineInstr &I) const {
1182  unsigned TyOpcode = ResType->getOpcode();
1183  assert(TyOpcode != SPIRV::OpTypePointer || Imm.isNullValue());
1184  MachineBasicBlock &BB = *I.getParent();
1185  if ((TyOpcode == SPIRV::OpTypePointer || TyOpcode == SPIRV::OpTypeEvent) &&
1186  Imm.isNullValue())
1187  return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1188  .addDef(ResVReg)
1189  .addUse(GR.getSPIRVTypeID(ResType))
1190  .constrainAllUses(TII, TRI, RBI);
1191  if (TyOpcode == SPIRV::OpTypeInt) {
1192  Register Reg = GR.getOrCreateConstInt(Imm.getZExtValue(), I, ResType, TII);
1193  if (Reg == ResVReg)
1194  return true;
1195  return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
1196  .addDef(ResVReg)
1197  .addUse(Reg)
1198  .constrainAllUses(TII, TRI, RBI);
1199  }
1200  auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
1201  .addDef(ResVReg)
1202  .addUse(GR.getSPIRVTypeID(ResType));
1203  // <=32-bit integers should be caught by the sdag pattern.
1204  assert(Imm.getBitWidth() > 32);
1205  addNumImm(Imm, MIB);
1206  return MIB.constrainAllUses(TII, TRI, RBI);
1207 }
1208 
1209 bool SPIRVInstructionSelector::selectOpUndef(Register ResVReg,
1210  const SPIRVType *ResType,
1211  MachineInstr &I) const {
1212  return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
1213  .addDef(ResVReg)
1214  .addUse(GR.getSPIRVTypeID(ResType))
1215  .constrainAllUses(TII, TRI, RBI);
1216 }
1217 
1218 static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
1219  assert(MO.isReg());
1220  const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
1221  if (TypeInst->getOpcode() != SPIRV::ASSIGN_TYPE)
1222  return false;
1223  assert(TypeInst->getOperand(1).isReg());
1224  MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
1225  return ImmInst->getOpcode() == TargetOpcode::G_CONSTANT;
1226 }
1227 
1228 static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
1229  const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
1230  MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
1231  assert(ImmInst->getOpcode() == TargetOpcode::G_CONSTANT);
1232  return ImmInst->getOperand(1).getCImm()->getZExtValue();
1233 }
1234 
1235 bool SPIRVInstructionSelector::selectInsertVal(Register ResVReg,
1236  const SPIRVType *ResType,
1237  MachineInstr &I) const {
1238  MachineBasicBlock &BB = *I.getParent();
1239  auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeInsert))
1240  .addDef(ResVReg)
1241  .addUse(GR.getSPIRVTypeID(ResType))
1242  // object to insert
1243  .addUse(I.getOperand(3).getReg())
1244  // composite to insert into
1245  .addUse(I.getOperand(2).getReg());
1246  for (unsigned i = 4; i < I.getNumOperands(); i++)
1247  MIB.addImm(foldImm(I.getOperand(i), MRI));
1248  return MIB.constrainAllUses(TII, TRI, RBI);
1249 }
1250 
1251 bool SPIRVInstructionSelector::selectExtractVal(Register ResVReg,
1252  const SPIRVType *ResType,
1253  MachineInstr &I) const {
1254  MachineBasicBlock &BB = *I.getParent();
1255  auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
1256  .addDef(ResVReg)
1257  .addUse(GR.getSPIRVTypeID(ResType))
1258  .addUse(I.getOperand(2).getReg());
1259  for (unsigned i = 3; i < I.getNumOperands(); i++)
1260  MIB.addImm(foldImm(I.getOperand(i), MRI));
1261  return MIB.constrainAllUses(TII, TRI, RBI);
1262 }
1263 
1264 bool SPIRVInstructionSelector::selectInsertElt(Register ResVReg,
1265  const SPIRVType *ResType,
1266  MachineInstr &I) const {
1267  if (isImm(I.getOperand(4), MRI))
1268  return selectInsertVal(ResVReg, ResType, I);
1269  MachineBasicBlock &BB = *I.getParent();
1270  return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorInsertDynamic))
1271  .addDef(ResVReg)
1272  .addUse(GR.getSPIRVTypeID(ResType))
1273  .addUse(I.getOperand(2).getReg())
1274  .addUse(I.getOperand(3).getReg())
1275  .addUse(I.getOperand(4).getReg())
1276  .constrainAllUses(TII, TRI, RBI);
1277 }
1278 
1279 bool SPIRVInstructionSelector::selectExtractElt(Register ResVReg,
1280  const SPIRVType *ResType,
1281  MachineInstr &I) const {
1282  if (isImm(I.getOperand(3), MRI))
1283  return selectExtractVal(ResVReg, ResType, I);
1284  MachineBasicBlock &BB = *I.getParent();
1285  return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorExtractDynamic))
1286  .addDef(ResVReg)
1287  .addUse(GR.getSPIRVTypeID(ResType))
1288  .addUse(I.getOperand(2).getReg())
1289  .addUse(I.getOperand(3).getReg())
1290  .constrainAllUses(TII, TRI, RBI);
1291 }
1292 
1293 bool SPIRVInstructionSelector::selectGEP(Register ResVReg,
1294  const SPIRVType *ResType,
1295  MachineInstr &I) const {
1296  // In general we should also support OpAccessChain instrs here (i.e. not
1297  // PtrAccessChain) but SPIRV-LLVM Translator doesn't emit them at all and so
1298  // do we to stay compliant with its test and more importantly consumers.
1299  unsigned Opcode = I.getOperand(2).getImm() ? SPIRV::OpInBoundsPtrAccessChain
1300  : SPIRV::OpPtrAccessChain;
1301  auto Res = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
1302  .addDef(ResVReg)
1303  .addUse(GR.getSPIRVTypeID(ResType))
1304  // Object to get a pointer to.
1305  .addUse(I.getOperand(3).getReg());
1306  // Adding indices.
1307  for (unsigned i = 4; i < I.getNumExplicitOperands(); ++i)
1308  Res.addUse(I.getOperand(i).getReg());
1309  return Res.constrainAllUses(TII, TRI, RBI);
1310 }
1311 
1312 bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
1313  const SPIRVType *ResType,
1314  MachineInstr &I) const {
1315  MachineBasicBlock &BB = *I.getParent();
1316  switch (I.getIntrinsicID()) {
1317  case Intrinsic::spv_load:
1318  return selectLoad(ResVReg, ResType, I);
1319  break;
1320  case Intrinsic::spv_store:
1321  return selectStore(I);
1322  break;
1323  case Intrinsic::spv_extractv:
1324  return selectExtractVal(ResVReg, ResType, I);
1325  break;
1326  case Intrinsic::spv_insertv:
1327  return selectInsertVal(ResVReg, ResType, I);
1328  break;
1329  case Intrinsic::spv_extractelt:
1330  return selectExtractElt(ResVReg, ResType, I);
1331  break;
1332  case Intrinsic::spv_insertelt:
1333  return selectInsertElt(ResVReg, ResType, I);
1334  break;
1335  case Intrinsic::spv_gep:
1336  return selectGEP(ResVReg, ResType, I);
1337  break;
1338  case Intrinsic::spv_unref_global:
1339  case Intrinsic::spv_init_global: {
1340  MachineInstr *MI = MRI->getVRegDef(I.getOperand(1).getReg());
1341  MachineInstr *Init = I.getNumExplicitOperands() > 2
1342  ? MRI->getVRegDef(I.getOperand(2).getReg())
1343  : nullptr;
1344  assert(MI);
1345  return selectGlobalValue(MI->getOperand(0).getReg(), *MI, Init);
1346  } break;
1347  case Intrinsic::spv_const_composite: {
1348  // If no values are attached, the composite is null constant.
1349  bool IsNull = I.getNumExplicitDefs() + 1 == I.getNumExplicitOperands();
1350  unsigned Opcode =
1351  IsNull ? SPIRV::OpConstantNull : SPIRV::OpConstantComposite;
1352  auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1353  .addDef(ResVReg)
1354  .addUse(GR.getSPIRVTypeID(ResType));
1355  // skip type MD node we already used when generated assign.type for this
1356  if (!IsNull) {
1357  for (unsigned i = I.getNumExplicitDefs() + 1;
1358  i < I.getNumExplicitOperands(); ++i) {
1359  MIB.addUse(I.getOperand(i).getReg());
1360  }
1361  }
1362  return MIB.constrainAllUses(TII, TRI, RBI);
1363  } break;
1364  case Intrinsic::spv_assign_name: {
1365  auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpName));
1366  MIB.addUse(I.getOperand(I.getNumExplicitDefs() + 1).getReg());
1367  for (unsigned i = I.getNumExplicitDefs() + 2;
1368  i < I.getNumExplicitOperands(); ++i) {
1369  MIB.addImm(I.getOperand(i).getImm());
1370  }
1371  return MIB.constrainAllUses(TII, TRI, RBI);
1372  } break;
1373  case Intrinsic::spv_switch: {
1374  auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSwitch));
1375  for (unsigned i = 1; i < I.getNumExplicitOperands(); ++i) {
1376  if (I.getOperand(i).isReg())
1377  MIB.addReg(I.getOperand(i).getReg());
1378  else if (I.getOperand(i).isCImm())
1379  addNumImm(I.getOperand(i).getCImm()->getValue(), MIB);
1380  else if (I.getOperand(i).isMBB())
1381  MIB.addMBB(I.getOperand(i).getMBB());
1382  else
1383  llvm_unreachable("Unexpected OpSwitch operand");
1384  }
1385  return MIB.constrainAllUses(TII, TRI, RBI);
1386  } break;
1387  case Intrinsic::spv_cmpxchg:
1388  return selectAtomicCmpXchg(ResVReg, ResType, I);
1389  break;
1390  case Intrinsic::spv_unreachable:
1391  BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUnreachable));
1392  break;
1393  case Intrinsic::spv_alloca:
1394  return selectFrameIndex(ResVReg, ResType, I);
1395  break;
1396  default:
1397  llvm_unreachable("Intrinsic selection not implemented");
1398  }
1399  return true;
1400 }
1401 
1402 bool SPIRVInstructionSelector::selectFrameIndex(Register ResVReg,
1403  const SPIRVType *ResType,
1404  MachineInstr &I) const {
1405  return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
1406  .addDef(ResVReg)
1407  .addUse(GR.getSPIRVTypeID(ResType))
1408  .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function))
1409  .constrainAllUses(TII, TRI, RBI);
1410 }
1411 
1412 bool SPIRVInstructionSelector::selectBranch(MachineInstr &I) const {
1413  // InstructionSelector walks backwards through the instructions. We can use
1414  // both a G_BR and a G_BRCOND to create an OpBranchConditional. We hit G_BR
1415  // first, so can generate an OpBranchConditional here. If there is no
1416  // G_BRCOND, we just use OpBranch for a regular unconditional branch.
1417  const MachineInstr *PrevI = I.getPrevNode();
1418  MachineBasicBlock &MBB = *I.getParent();
1419  if (PrevI != nullptr && PrevI->getOpcode() == TargetOpcode::G_BRCOND) {
1420  return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
1421  .addUse(PrevI->getOperand(0).getReg())
1422  .addMBB(PrevI->getOperand(1).getMBB())
1423  .addMBB(I.getOperand(0).getMBB())
1424  .constrainAllUses(TII, TRI, RBI);
1425  }
1426  return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranch))
1427  .addMBB(I.getOperand(0).getMBB())
1428  .constrainAllUses(TII, TRI, RBI);
1429 }
1430 
1431 bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &I) const {
1432  // InstructionSelector walks backwards through the instructions. For an
1433  // explicit conditional branch with no fallthrough, we use both a G_BR and a
1434  // G_BRCOND to create an OpBranchConditional. We should hit G_BR first, and
1435  // generate the OpBranchConditional in selectBranch above.
1436  //
1437  // If an OpBranchConditional has been generated, we simply return, as the work
1438  // is alread done. If there is no OpBranchConditional, LLVM must be relying on
1439  // implicit fallthrough to the next basic block, so we need to create an
1440  // OpBranchConditional with an explicit "false" argument pointing to the next
1441  // basic block that LLVM would fall through to.
1442  const MachineInstr *NextI = I.getNextNode();
1443  // Check if this has already been successfully selected.
1444  if (NextI != nullptr && NextI->getOpcode() == SPIRV::OpBranchConditional)
1445  return true;
1446  // Must be relying on implicit block fallthrough, so generate an
1447  // OpBranchConditional with the "next" basic block as the "false" target.
1448  MachineBasicBlock &MBB = *I.getParent();
1449  unsigned NextMBBNum = MBB.getNextNode()->getNumber();
1450  MachineBasicBlock *NextMBB = I.getMF()->getBlockNumbered(NextMBBNum);
1451  return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
1452  .addUse(I.getOperand(0).getReg())
1453  .addMBB(I.getOperand(1).getMBB())
1454  .addMBB(NextMBB)
1455  .constrainAllUses(TII, TRI, RBI);
1456 }
1457 
1458 bool SPIRVInstructionSelector::selectPhi(Register ResVReg,
1459  const SPIRVType *ResType,
1460  MachineInstr &I) const {
1461  auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpPhi))
1462  .addDef(ResVReg)
1463  .addUse(GR.getSPIRVTypeID(ResType));
1464  const unsigned NumOps = I.getNumOperands();
1465  for (unsigned i = 1; i < NumOps; i += 2) {
1466  MIB.addUse(I.getOperand(i + 0).getReg());
1467  MIB.addMBB(I.getOperand(i + 1).getMBB());
1468  }
1469  return MIB.constrainAllUses(TII, TRI, RBI);
1470 }
1471 
1472 bool SPIRVInstructionSelector::selectGlobalValue(
1473  Register ResVReg, MachineInstr &I, const MachineInstr *Init) const {
1474  // FIXME: don't use MachineIRBuilder here, replace it with BuildMI.
1475  MachineIRBuilder MIRBuilder(I);
1476  const GlobalValue *GV = I.getOperand(1).getGlobal();
1477  SPIRVType *ResType = GR.getOrCreateSPIRVType(
1478  GV->getType(), MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false);
1479 
1480  std::string GlobalIdent = GV->getGlobalIdentifier();
1481  // We have functions as operands in tests with blocks of instruction e.g. in
1482  // transcoding/global_block.ll. These operands are not used and should be
1483  // substituted by zero constants. Their type is expected to be always
1484  // OpTypePointer Function %uchar.
1485  if (isa<Function>(GV)) {
1486  const Constant *ConstVal = GV;
1487  MachineBasicBlock &BB = *I.getParent();
1488  Register NewReg = GR.find(ConstVal, GR.CurMF);
1489  if (!NewReg.isValid()) {
1490  SPIRVType *SpvBaseTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
1491  ResType = GR.getOrCreateSPIRVPointerType(SpvBaseTy, I, TII);
1492  Register NewReg = ResVReg;
1493  GR.add(ConstVal, GR.CurMF, NewReg);
1494  return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1495  .addDef(NewReg)
1496  .addUse(GR.getSPIRVTypeID(ResType))
1497  .constrainAllUses(TII, TRI, RBI);
1498  }
1499  assert(NewReg != ResVReg);
1500  return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
1501  .addDef(ResVReg)
1502  .addUse(NewReg)
1503  .constrainAllUses(TII, TRI, RBI);
1504  }
1505  auto GlobalVar = cast<GlobalVariable>(GV);
1506  assert(GlobalVar->getName() != "llvm.global.annotations");
1507 
1508  bool HasInit = GlobalVar->hasInitializer() &&
1509  !isa<UndefValue>(GlobalVar->getInitializer());
1510  // Skip empty declaration for GVs with initilaizers till we get the decl with
1511  // passed initializer.
1512  if (HasInit && !Init)
1513  return true;
1514 
1515  unsigned AddrSpace = GV->getAddressSpace();
1517  addressSpaceToStorageClass(AddrSpace);
1518  bool HasLnkTy = GV->getLinkage() != GlobalValue::InternalLinkage &&
1519  Storage != SPIRV::StorageClass::Function;
1520  SPIRV::LinkageType::LinkageType LnkType =
1522  ? SPIRV::LinkageType::Import
1523  : SPIRV::LinkageType::Export;
1524 
1525  Register Reg = GR.buildGlobalVariable(ResVReg, ResType, GlobalIdent, GV,
1526  Storage, Init, GlobalVar->isConstant(),
1527  HasLnkTy, LnkType, MIRBuilder, true);
1528  return Reg.isValid();
1529 }
1530 
1531 namespace llvm {
1534  const SPIRVSubtarget &Subtarget,
1535  const RegisterBankInfo &RBI) {
1536  return new SPIRVInstructionSelector(TM, Subtarget, RBI);
1537 }
1538 } // namespace llvm
i
i
Definition: README.txt:29
llvm::GlobalValue::getGlobalIdentifier
static std::string getGlobalIdentifier(StringRef Name, GlobalValue::LinkageTypes Linkage, StringRef FileName)
Return the modified name for a global value suitable to be used as the key for a global lookup (e....
Definition: Globals.cpp:142
GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_PREDICATES_INIT
getName
static StringRef getName(Value *V)
Definition: ProvenanceAnalysisEvaluator.cpp:42
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:108
llvm::MachineInstrBuilder::addImm
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Definition: MachineInstrBuilder.h:131
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
llvm::tgtok::Def
@ Def
Definition: TGLexer.h:50
llvm::MachineRegisterInfo::createVirtualRegister
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
Definition: MachineRegisterInfo.cpp:156
llvm::CmpInst::Predicate
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:719
ceil
We have fiadd patterns now but the followings have the same cost and complexity We need a way to specify the later is more profitable def def The FP stackifier should handle simple permutates to reduce number of shuffle e g ceil
Definition: README-FPStack.txt:54
llvm::GISelKnownBits
Definition: GISelKnownBits.h:29
llvm::MachineRegisterInfo
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Definition: MachineRegisterInfo.h:50
llvm::GlobalValue::getLinkage
LinkageTypes getLinkage() const
Definition: GlobalValue.h:539
ExtInstList
std::vector< std::pair< SPIRV::InstructionSet::InstructionSet, uint32_t > > ExtInstList
Definition: SPIRVInstructionSelector.cpp:37
llvm::MachineRegisterInfo::def_instr_begin
def_instr_iterator def_instr_begin(Register RegNo) const
Definition: MachineRegisterInfo.h:405
llvm::ilist_node_with_parent::getNextNode
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:289
llvm::isPreISelGenericOpcode
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
Definition: TargetOpcodes.h:30
isTypeFoldingSupported
bool isTypeFoldingSupported(unsigned Opcode)
Definition: SPIRVLegalizerInfo.cpp:53
llvm::X86Disassembler::Reg
Reg
All possible values of the reg field in the ModR/M byte.
Definition: X86DisassemblerDecoder.h:462
llvm::GlobalVariable
Definition: GlobalVariable.h:39
llvm::SyncScope::System
@ System
Synchronized with respect to all concurrently executing threads.
Definition: LLVMContext.h:57
llvm::MachineRegisterInfo::defusechain_instr_iterator
defusechain_iterator - This class provides iterator support for machine operands in the function that...
Definition: MachineRegisterInfo.h:277
llvm::MemOp
Definition: TargetLowering.h:110
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
llvm::MachineRegisterInfo::use_instructions
iterator_range< use_instr_iterator > use_instructions(Register Reg) const
Definition: MachineRegisterInfo.h:493
llvm::MachineMemOperand
A description of a memory reference used in the backend.
Definition: MachineMemOperand.h:127
llvm::SPIRVSubtarget
Definition: SPIRVSubtarget.h:35
llvm::ConstantFP::getValueAPF
const APFloat & getValueAPF() const
Definition: Constants.h:298
llvm::errs
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
Definition: raw_ostream.cpp:891
TRI
unsigned const TargetRegisterInfo * TRI
Definition: MachineSink.cpp:1628
GET_GLOBALISEL_TEMPORARIES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
llvm::constrainSelectedInstRegOperands
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition: Utils.cpp:152
isImm
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
Definition: SPIRVInstructionSelector.cpp:1218
MachineRegisterInfo.h
floor
We have fiadd patterns now but the followings have the same cost and complexity We need a way to specify the later is more profitable def def The FP stackifier should handle simple permutates to reduce number of shuffle e g floor
Definition: README-FPStack.txt:54
include
include(LLVM-Build) add_subdirectory(IR) add_subdirectory(FuzzMutate) add_subdirectory(FileCheck) add_subdirectory(InterfaceStub) add_subdirectory(IRReader) add_subdirectory(CodeGen) add_subdirectory(BinaryFormat) add_subdirectory(Bitcode) add_subdirectory(Bitstream) add_subdirectory(DWARFLinker) add_subdirectory(Extensions) add_subdirectory(Frontend) add_subdirectory(Transforms) add_subdirectory(Linker) add_subdirectory(Analysis) add_subdirectory(LTO) add_subdirectory(MC) add_subdirectory(MCA) add_subdirectory(ObjCopy) add_subdirectory(Object) add_subdirectory(ObjectYAML) add_subdirectory(Option) add_subdirectory(Remarks) add_subdirectory(Debuginfod) add_subdirectory(DebugInfo) add_subdirectory(DWP) add_subdirectory(ExecutionEngine) add_subdirectory(Target) add_subdirectory(AsmParser) add_subdirectory(LineEditor) add_subdirectory(ProfileData) add_subdirectory(Passes) add_subdirectory(TextAPI) add_subdirectory(ToolDrivers) add_subdirectory(XRay) if(LLVM_INCLUDE_TESTS) add_subdirectory(Testing) endif() add_subdirectory(WindowsDriver) add_subdirectory(WindowsManifest) set(LLVMCONFIGLIBRARYDEPENDENCIESINC "$
Definition: CMakeLists.txt:1
llvm::MachineInstrBuilder::addDef
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Definition: MachineInstrBuilder.h:116
llvm::all_of
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1590
llvm::BlockFrequencyInfo
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Definition: BlockFrequencyInfo.h:37
llvm::MachineFunction::getRegInfo
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Definition: MachineFunction.h:666
select
into xmm2 addss xmm2 xmm1 xmm3 addss xmm3 movaps xmm0 unpcklps xmm0 ret seems silly when it could just be one addps Expand libm rounding functions main should enable SSE DAZ mode and other fast SSE modes Think about doing i64 math in SSE regs on x86 This testcase should have no SSE instructions in and only one load from a constant double ret double C the select is being which prevents the dag combiner from turning select(load CPI1)
llvm::MachineInstrBuilder::addMBB
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Definition: MachineInstrBuilder.h:146
llvm::GlobalValue::isDeclaration
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:271
llvm::PPCISD::SC
@ SC
CHAIN = SC CHAIN, Imm128 - System call.
Definition: PPCISelLowering.h:418
llvm::MachineRegisterInfo::setType
void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
Definition: MachineRegisterInfo.cpp:180
llvm::ISD::Constant
@ Constant
Definition: ISDOpcodes.h:76
llvm::MachineOperand::getImm
int64_t getImm() const
Definition: MachineOperand.h:546
llvm::MachineInstr::getOperand
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:526
round
static uint64_t round(uint64_t Acc, uint64_t Input)
Definition: xxhash.cpp:56
llvm::Log2
unsigned Log2(Align A)
Returns the log2 of the alignment.
Definition: Alignment.h:209
StorageClass
COFF::SymbolStorageClass StorageClass
Definition: COFFYAML.cpp:361
llvm::SyncScope::SingleThread
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
Definition: LLVMContext.h:54
llvm::ms_demangle::QualifierMangleMode::Result
@ Result
isGenericCastablePtr
static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
Definition: SPIRVInstructionSelector.cpp:784
TII
const HexagonInstrInfo * TII
Definition: HexagonCopyToCombine.cpp:125
llvm::MachineOperand
MachineOperand class - Representation of each machine instruction operand.
Definition: MachineOperand.h:48
SPIRVRegisterInfo.h
SPIRVUtils.h
llvm::addressSpaceToStorageClass
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace)
Definition: SPIRVUtils.cpp:158
llvm::APInt::getZExtValue
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1466
llvm::ConstantFP
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:257
llvm::MipsISD::FAbs
@ FAbs
Definition: MipsISelLowering.h:103
APFloat.h
llvm::APFloat::bitcastToAPInt
APInt bitcastToAPInt() const
Definition: APFloat.h:1130
Register
Promote Memory to Register
Definition: Mem2Reg.cpp:110
llvm::MachineRegisterInfo::getVRegDef
MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
Definition: MachineRegisterInfo.cpp:396
llvm::None
const NoneType None
Definition: None.h:24
foldImm
static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
Definition: SPIRVInstructionSelector.cpp:1228
llvm::MachineBasicBlock
Definition: MachineBasicBlock.h:94
getScope
static SPIRV::Scope::Scope getScope(SyncScope::ID Ord)
Definition: SPIRVInstructionSelector.cpp:551
llvm::ProfileSummaryInfo
Analysis providing profile information.
Definition: ProfileSummaryInfo.h:39
InstructionSelector.h
SPIRVTargetMachine.h
SPIRVInstrInfo.h
llvm::AtomicOrdering
AtomicOrdering
Atomic ordering for LLVM's memory model.
Definition: AtomicOrdering.h:56
llvm::SPIRVRegisterInfo
Definition: SPIRVRegisterInfo.h:23
llvm::GlobalValue::hasAvailableExternallyLinkage
bool hasAvailableExternallyLinkage() const
Definition: GlobalValue.h:505
llvm::RegisterBankInfo
Holds all the information related to register banks.
Definition: RegisterBankInfo.h:39
llvm::getMemSemantics
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
Definition: SPIRVUtils.cpp:196
llvm::GlobalValue
Definition: GlobalValue.h:44
llvm::InstructionSelector
Provides the logic to select generic machine instructions.
Definition: InstructionSelector.h:428
llvm::Constant
This is an important base class in LLVM.
Definition: Constant.h:41
llvm::MachineIRBuilder
Helper class to build MachineInstr.
Definition: MachineIRBuilder.h:221
llvm::CodeGenCoverage
Definition: CodeGenCoverage.h:19
llvm::getIConstVal
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
Definition: SPIRVUtils.cpp:228
llvm::MachineOperand::isReg
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Definition: MachineOperand.h:320
llvm::MachineOperand::getCImm
const ConstantInt * getCImm() const
Definition: MachineOperand.h:551
llvm::MachineInstr
Representation of each machine instruction.
Definition: MachineInstr.h:66
llvm::MachineInstrBuilder
Definition: MachineInstrBuilder.h:69
SPIRVGlobalRegistry.h
uint64_t
llvm::ARM_MB::ST
@ ST
Definition: ARMBaseInfo.h:73
DEBUG_TYPE
#define DEBUG_TYPE
Definition: SPIRVInstructionSelector.cpp:30
Generic
@ Generic
Definition: AArch64MCAsmInfo.cpp:23
llvm::SyncScope::ID
uint8_t ID
Definition: LLVMContext.h:46
I
#define I(x, y, z)
Definition: MD5.cpp:58
SPIRV.h
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Ptr
@ Ptr
Definition: TargetLibraryInfo.cpp:60
llvm::MachineRegisterInfo::createGenericVirtualRegister
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
Definition: MachineRegisterInfo.cpp:186
llvm::SPIRVGlobalRegistry
Definition: SPIRVGlobalRegistry.h:27
llvm::MachineInstrBuilder::addReg
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Definition: MachineInstrBuilder.h:97
llvm::MachineInstrBuilder::addUse
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
Definition: MachineInstrBuilder.h:123
llvm::MachineOperand::getReg
Register getReg() const
getReg - Returns the register number.
Definition: MachineOperand.h:359
llvm::MachineInstrBuilder::constrainAllUses
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
Definition: MachineInstrBuilder.h:326
llvm::APInt
Class for arbitrary precision integers.
Definition: APInt.h:75
llvm::MachineFunction
Definition: MachineFunction.h:257
addMemoryOperands
static void addMemoryOperands(MachineMemOperand *MemOp, MachineInstrBuilder &MIB)
Definition: SPIRVInstructionSelector.cpp:562
llvm::SPIRVInstrInfo
Definition: SPIRVInstrInfo.h:24
llvm::MachineOperand::getMBB
MachineBasicBlock * getMBB() const
Definition: MachineOperand.h:561
llvm::MachineBasicBlock::getNumber
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
Definition: MachineBasicBlock.h:1115
llvm::MachineInstr::getOpcode
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:516
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:143
uint32_t
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::addNumImm
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
Definition: SPIRVUtils.cpp:78
trunc
We have fiadd patterns now but the followings have the same cost and complexity We need a way to specify the later is more profitable def def The FP stackifier should handle simple permutates to reduce number of shuffle e g trunc
Definition: README-FPStack.txt:63
llvm::AMDGPUISD::BFI
@ BFI
Definition: AMDGPUISelLowering.h:429
InstructionSelectorImpl.h
llvm::Init
Definition: Record.h:281
MRI
unsigned const MachineRegisterInfo * MRI
Definition: AArch64AdvSIMDScalarPass.cpp:105
llvm::SPIRVTargetMachine
Definition: SPIRVTargetMachine.h:20
llvm::Register
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
llvm::ConstantInt::getZExtValue
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:142
llvm::MachineRegisterInfo::replaceRegWith
void replaceRegWith(Register FromReg, Register ToReg)
replaceRegWith - Replace all instances of FromReg with ToReg in the machine function.
Definition: MachineRegisterInfo.cpp:378
MBB
MachineBasicBlock & MBB
Definition: AArch64SLSHardening.cpp:74
getPtrCmpOpcode
static unsigned getPtrCmpOpcode(unsigned Pred)
Definition: SPIRVInstructionSelector.cpp:922
get
Should compile to something r4 addze r3 instead we get
Definition: README.txt:24
llvm::X86::FirstMacroFusionInstKind::Cmp
@ Cmp
Success
#define Success
Definition: AArch64Disassembler.cpp:280
getFCmpOpcode
static unsigned getFCmpOpcode(unsigned PredNum)
Definition: SPIRVInstructionSelector.cpp:858
llvm::BitWidth
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:147
SPIRVRegisterBankInfo.h
llvm::log2
static double log2(double V)
Definition: AMDGPULibCalls.cpp:794
llvm::codeview::ModifierOptions::Const
@ Const
llvm::GlobalValue::getAddressSpace
unsigned getAddressSpace() const
Definition: Globals.cpp:121
getICmpOpcode
static unsigned getICmpOpcode(unsigned PredNum)
Definition: SPIRVInstructionSelector.cpp:894
llvm::RISCVMatInt::Imm
@ Imm
Definition: RISCVMatInt.h:23
llvm::BuildMI
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
Definition: MachineInstrBuilder.h:357
llvm::getMemSemanticsForStorageClass
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
Definition: SPIRVUtils.cpp:178
MachineInstrBuilder.h
llvm::Register::isValid
bool isValid() const
Definition: Register.h:126
getBoolCmpOpcode
static unsigned getBoolCmpOpcode(unsigned PredNum)
Definition: SPIRVInstructionSelector.cpp:934
llvm::createSPIRVInstructionSelector
InstructionSelector * createSPIRVInstructionSelector(const SPIRVTargetMachine &TM, const SPIRVSubtarget &Subtarget, const RegisterBankInfo &RBI)
Definition: SPIRVInstructionSelector.cpp:1533
llvm::buildOpDecorate
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Definition: SPIRVUtils.cpp:117
llvm::lltok::GlobalVar
@ GlobalVar
Definition: LLToken.h:417
llvm::GlobalValue::getType
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:288
TM
const char LLVMTargetMachineRef TM
Definition: PassBuilderBindings.cpp:47
BB
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
Definition: README.txt:39
llvm::isSpvIntrinsic
bool isSpvIntrinsic(MachineInstr &MI, Intrinsic::ID IntrinsicID)
Definition: SPIRVUtils.cpp:234
llvm::DebugLoc
A debug info location.
Definition: DebugLoc.h:33
llvm::mca::selectImpl
static uint64_t selectImpl(uint64_t CandidateMask, uint64_t &NextInSequenceMask)
Definition: ResourceManager.cpp:26
Debug.h
llvm::Intrinsic::ID
unsigned ID
Definition: TargetTransformInfo.h:38