LLVM  15.0.0git
SPIRVInstructionSelector.cpp
Go to the documentation of this file.
1 //===- SPIRVInstructionSelector.cpp ------------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the targeting of the InstructionSelector class for
10 // SPIRV.
11 // TODO: This should be generated by TableGen.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "SPIRV.h"
16 #include "SPIRVGlobalRegistry.h"
17 #include "SPIRVInstrInfo.h"
18 #include "SPIRVRegisterBankInfo.h"
19 #include "SPIRVRegisterInfo.h"
20 #include "SPIRVTargetMachine.h"
21 #include "SPIRVUtils.h"
22 #include "llvm/ADT/APFloat.h"
27 #include "llvm/IR/IntrinsicsSPIRV.h"
28 #include "llvm/Support/Debug.h"
29 
30 #define DEBUG_TYPE "spirv-isel"
31 
32 using namespace llvm;
33 
34 namespace {
35 
36 #define GET_GLOBALISEL_PREDICATE_BITSET
37 #include "SPIRVGenGlobalISel.inc"
38 #undef GET_GLOBALISEL_PREDICATE_BITSET
39 
40 class SPIRVInstructionSelector : public InstructionSelector {
41  const SPIRVSubtarget &STI;
42  const SPIRVInstrInfo &TII;
43  const SPIRVRegisterInfo &TRI;
44  const RegisterBankInfo &RBI;
47 
48 public:
49  SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
50  const SPIRVSubtarget &ST,
51  const RegisterBankInfo &RBI);
52  void setupMF(MachineFunction &MF, GISelKnownBits *KB,
53  CodeGenCoverage &CoverageInfo, ProfileSummaryInfo *PSI,
54  BlockFrequencyInfo *BFI) override;
55  // Common selection code. Instruction-specific selection occurs in spvSelect.
56  bool select(MachineInstr &I) override;
57  static const char *getName() { return DEBUG_TYPE; }
58 
59 #define GET_GLOBALISEL_PREDICATES_DECL
60 #include "SPIRVGenGlobalISel.inc"
61 #undef GET_GLOBALISEL_PREDICATES_DECL
62 
63 #define GET_GLOBALISEL_TEMPORARIES_DECL
64 #include "SPIRVGenGlobalISel.inc"
65 #undef GET_GLOBALISEL_TEMPORARIES_DECL
66 
67 private:
68  // tblgen-erated 'select' implementation, used as the initial selector for
69  // the patterns that don't require complex C++.
70  bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
71 
72  // All instruction-specific selection that didn't happen in "select()".
73  // Is basically a large Switch/Case delegating to all other select method.
74  bool spvSelect(Register ResVReg, const SPIRVType *ResType,
75  MachineInstr &I) const;
76 
77  bool selectGlobalValue(Register ResVReg, MachineInstr &I,
78  const MachineInstr *Init = nullptr) const;
79 
80  bool selectUnOpWithSrc(Register ResVReg, const SPIRVType *ResType,
81  MachineInstr &I, Register SrcReg,
82  unsigned Opcode) const;
83  bool selectUnOp(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
84  unsigned Opcode) const;
85 
86  bool selectLoad(Register ResVReg, const SPIRVType *ResType,
87  MachineInstr &I) const;
88  bool selectStore(MachineInstr &I) const;
89 
90  bool selectMemOperation(Register ResVReg, MachineInstr &I) const;
91 
92  bool selectAtomicRMW(Register ResVReg, const SPIRVType *ResType,
93  MachineInstr &I, unsigned NewOpcode) const;
94 
95  bool selectAtomicCmpXchg(Register ResVReg, const SPIRVType *ResType,
96  MachineInstr &I) const;
97 
98  bool selectFence(MachineInstr &I) const;
99 
100  bool selectAddrSpaceCast(Register ResVReg, const SPIRVType *ResType,
101  MachineInstr &I) const;
102 
103  bool selectBitreverse(Register ResVReg, const SPIRVType *ResType,
104  MachineInstr &I) const;
105 
106  bool selectConstVector(Register ResVReg, const SPIRVType *ResType,
107  MachineInstr &I) const;
108 
109  bool selectCmp(Register ResVReg, const SPIRVType *ResType,
110  unsigned comparisonOpcode, MachineInstr &I) const;
111 
112  bool selectICmp(Register ResVReg, const SPIRVType *ResType,
113  MachineInstr &I) const;
114  bool selectFCmp(Register ResVReg, const SPIRVType *ResType,
115  MachineInstr &I) const;
116 
117  void renderImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
118  int OpIdx) const;
119  void renderFImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
120  int OpIdx) const;
121 
122  bool selectConst(Register ResVReg, const SPIRVType *ResType, const APInt &Imm,
123  MachineInstr &I) const;
124 
125  bool selectSelect(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
126  bool IsSigned) const;
127  bool selectIToF(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
128  bool IsSigned, unsigned Opcode) const;
129  bool selectExt(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
130  bool IsSigned) const;
131 
132  bool selectTrunc(Register ResVReg, const SPIRVType *ResType,
133  MachineInstr &I) const;
134 
135  bool selectIntToBool(Register IntReg, Register ResVReg,
136  const SPIRVType *intTy, const SPIRVType *boolTy,
137  MachineInstr &I) const;
138 
139  bool selectOpUndef(Register ResVReg, const SPIRVType *ResType,
140  MachineInstr &I) const;
141  bool selectIntrinsic(Register ResVReg, const SPIRVType *ResType,
142  MachineInstr &I) const;
143  bool selectExtractVal(Register ResVReg, const SPIRVType *ResType,
144  MachineInstr &I) const;
145  bool selectInsertVal(Register ResVReg, const SPIRVType *ResType,
146  MachineInstr &I) const;
147  bool selectExtractElt(Register ResVReg, const SPIRVType *ResType,
148  MachineInstr &I) const;
149  bool selectInsertElt(Register ResVReg, const SPIRVType *ResType,
150  MachineInstr &I) const;
151  bool selectGEP(Register ResVReg, const SPIRVType *ResType,
152  MachineInstr &I) const;
153 
154  bool selectFrameIndex(Register ResVReg, const SPIRVType *ResType,
155  MachineInstr &I) const;
156 
157  bool selectBranch(MachineInstr &I) const;
158  bool selectBranchCond(MachineInstr &I) const;
159 
160  bool selectPhi(Register ResVReg, const SPIRVType *ResType,
161  MachineInstr &I) const;
162 
163  Register buildI32Constant(uint32_t Val, MachineInstr &I,
164  const SPIRVType *ResType = nullptr) const;
165 
166  Register buildZerosVal(const SPIRVType *ResType, MachineInstr &I) const;
167  Register buildOnesVal(bool AllOnes, const SPIRVType *ResType,
168  MachineInstr &I) const;
169 };
170 
171 } // end anonymous namespace
172 
173 #define GET_GLOBALISEL_IMPL
174 #include "SPIRVGenGlobalISel.inc"
175 #undef GET_GLOBALISEL_IMPL
176 
177 SPIRVInstructionSelector::SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
178  const SPIRVSubtarget &ST,
179  const RegisterBankInfo &RBI)
180  : InstructionSelector(), STI(ST), TII(*ST.getInstrInfo()),
181  TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
183 #include "SPIRVGenGlobalISel.inc"
186 #include "SPIRVGenGlobalISel.inc"
188 {
189 }
190 
191 void SPIRVInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
192  CodeGenCoverage &CoverageInfo,
193  ProfileSummaryInfo *PSI,
195  MRI = &MF.getRegInfo();
196  GR.setCurrentFunc(MF);
197  InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
198 }
199 
200 // Defined in SPIRVLegalizerInfo.cpp.
201 extern bool isTypeFoldingSupported(unsigned Opcode);
202 
204  assert(I.getParent() && "Instruction should be in a basic block!");
205  assert(I.getParent()->getParent() && "Instruction should be in a function!");
206 
207  Register Opcode = I.getOpcode();
208  // If it's not a GMIR instruction, we've selected it already.
209  if (!isPreISelGenericOpcode(Opcode)) {
210  if (Opcode == SPIRV::ASSIGN_TYPE) { // These pseudos aren't needed any more.
211  auto *Def = MRI->getVRegDef(I.getOperand(1).getReg());
212  if (isTypeFoldingSupported(Def->getOpcode())) {
213  auto Res = selectImpl(I, *CoverageInfo);
214  assert(Res || Def->getOpcode() == TargetOpcode::G_CONSTANT);
215  if (Res)
216  return Res;
217  }
218  MRI->replaceRegWith(I.getOperand(1).getReg(), I.getOperand(0).getReg());
219  I.removeFromParent();
220  } else if (I.getNumDefs() == 1) {
221  // Make all vregs 32 bits (for SPIR-V IDs).
222  MRI->setType(I.getOperand(0).getReg(), LLT::scalar(32));
223  }
224  return true;
225  }
226 
227  if (I.getNumOperands() != I.getNumExplicitOperands()) {
228  LLVM_DEBUG(errs() << "Generic instr has unexpected implicit operands\n");
229  return false;
230  }
231 
232  // Common code for getting return reg+type, and removing selected instr
233  // from parent occurs here. Instr-specific selection happens in spvSelect().
234  bool HasDefs = I.getNumDefs() > 0;
235  Register ResVReg = HasDefs ? I.getOperand(0).getReg() : Register(0);
236  SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) : nullptr;
237  assert(!HasDefs || ResType || I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
238  if (spvSelect(ResVReg, ResType, I)) {
239  if (HasDefs) // Make all vregs 32 bits (for SPIR-V IDs).
240  MRI->setType(ResVReg, LLT::scalar(32));
241  I.removeFromParent();
242  return true;
243  }
244  return false;
245 }
246 
247 bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
248  const SPIRVType *ResType,
249  MachineInstr &I) const {
250  assert(!isTypeFoldingSupported(I.getOpcode()) ||
251  I.getOpcode() == TargetOpcode::G_CONSTANT);
252  const unsigned Opcode = I.getOpcode();
253  switch (Opcode) {
254  case TargetOpcode::G_CONSTANT:
255  return selectConst(ResVReg, ResType, I.getOperand(1).getCImm()->getValue(),
256  I);
257  case TargetOpcode::G_GLOBAL_VALUE:
258  return selectGlobalValue(ResVReg, I);
259  case TargetOpcode::G_IMPLICIT_DEF:
260  return selectOpUndef(ResVReg, ResType, I);
261 
262  case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
263  return selectIntrinsic(ResVReg, ResType, I);
264  case TargetOpcode::G_BITREVERSE:
265  return selectBitreverse(ResVReg, ResType, I);
266 
267  case TargetOpcode::G_BUILD_VECTOR:
268  return selectConstVector(ResVReg, ResType, I);
269 
270  case TargetOpcode::G_SHUFFLE_VECTOR: {
271  MachineBasicBlock &BB = *I.getParent();
272  auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorShuffle))
273  .addDef(ResVReg)
274  .addUse(GR.getSPIRVTypeID(ResType))
275  .addUse(I.getOperand(1).getReg())
276  .addUse(I.getOperand(2).getReg());
277  for (auto V : I.getOperand(3).getShuffleMask())
278  MIB.addImm(V);
279  return MIB.constrainAllUses(TII, TRI, RBI);
280  }
281  case TargetOpcode::G_MEMMOVE:
282  case TargetOpcode::G_MEMCPY:
283  return selectMemOperation(ResVReg, I);
284 
285  case TargetOpcode::G_ICMP:
286  return selectICmp(ResVReg, ResType, I);
287  case TargetOpcode::G_FCMP:
288  return selectFCmp(ResVReg, ResType, I);
289 
290  case TargetOpcode::G_FRAME_INDEX:
291  return selectFrameIndex(ResVReg, ResType, I);
292 
293  case TargetOpcode::G_LOAD:
294  return selectLoad(ResVReg, ResType, I);
295  case TargetOpcode::G_STORE:
296  return selectStore(I);
297 
298  case TargetOpcode::G_BR:
299  return selectBranch(I);
300  case TargetOpcode::G_BRCOND:
301  return selectBranchCond(I);
302 
303  case TargetOpcode::G_PHI:
304  return selectPhi(ResVReg, ResType, I);
305 
306  case TargetOpcode::G_FPTOSI:
307  return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToS);
308  case TargetOpcode::G_FPTOUI:
309  return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToU);
310 
311  case TargetOpcode::G_SITOFP:
312  return selectIToF(ResVReg, ResType, I, true, SPIRV::OpConvertSToF);
313  case TargetOpcode::G_UITOFP:
314  return selectIToF(ResVReg, ResType, I, false, SPIRV::OpConvertUToF);
315 
316  case TargetOpcode::G_CTPOP:
317  return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitCount);
318 
319  case TargetOpcode::G_SEXT:
320  return selectExt(ResVReg, ResType, I, true);
321  case TargetOpcode::G_ANYEXT:
322  case TargetOpcode::G_ZEXT:
323  return selectExt(ResVReg, ResType, I, false);
324  case TargetOpcode::G_TRUNC:
325  return selectTrunc(ResVReg, ResType, I);
326  case TargetOpcode::G_FPTRUNC:
327  case TargetOpcode::G_FPEXT:
328  return selectUnOp(ResVReg, ResType, I, SPIRV::OpFConvert);
329 
330  case TargetOpcode::G_PTRTOINT:
331  return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertPtrToU);
332  case TargetOpcode::G_INTTOPTR:
333  return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertUToPtr);
334  case TargetOpcode::G_BITCAST:
335  return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
336  case TargetOpcode::G_ADDRSPACE_CAST:
337  return selectAddrSpaceCast(ResVReg, ResType, I);
338 
339  case TargetOpcode::G_ATOMICRMW_OR:
340  return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicOr);
341  case TargetOpcode::G_ATOMICRMW_ADD:
342  return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicIAdd);
343  case TargetOpcode::G_ATOMICRMW_AND:
344  return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicAnd);
345  case TargetOpcode::G_ATOMICRMW_MAX:
346  return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMax);
347  case TargetOpcode::G_ATOMICRMW_MIN:
348  return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMin);
349  case TargetOpcode::G_ATOMICRMW_SUB:
350  return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicISub);
351  case TargetOpcode::G_ATOMICRMW_XOR:
352  return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicXor);
353  case TargetOpcode::G_ATOMICRMW_UMAX:
354  return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMax);
355  case TargetOpcode::G_ATOMICRMW_UMIN:
356  return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMin);
357  case TargetOpcode::G_ATOMICRMW_XCHG:
358  return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicExchange);
359  case TargetOpcode::G_ATOMIC_CMPXCHG:
360  return selectAtomicCmpXchg(ResVReg, ResType, I);
361 
362  case TargetOpcode::G_FENCE:
363  return selectFence(I);
364 
365  default:
366  return false;
367  }
368 }
369 
370 bool SPIRVInstructionSelector::selectUnOpWithSrc(Register ResVReg,
371  const SPIRVType *ResType,
372  MachineInstr &I,
373  Register SrcReg,
374  unsigned Opcode) const {
375  return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
376  .addDef(ResVReg)
377  .addUse(GR.getSPIRVTypeID(ResType))
378  .addUse(SrcReg)
379  .constrainAllUses(TII, TRI, RBI);
380 }
381 
382 bool SPIRVInstructionSelector::selectUnOp(Register ResVReg,
383  const SPIRVType *ResType,
384  MachineInstr &I,
385  unsigned Opcode) const {
386  return selectUnOpWithSrc(ResVReg, ResType, I, I.getOperand(1).getReg(),
387  Opcode);
388 }
389 
391  switch (Ord) {
392  case AtomicOrdering::Acquire:
393  return SPIRV::MemorySemantics::Acquire;
394  case AtomicOrdering::Release:
395  return SPIRV::MemorySemantics::Release;
396  case AtomicOrdering::AcquireRelease:
397  return SPIRV::MemorySemantics::AcquireRelease;
398  case AtomicOrdering::SequentiallyConsistent:
399  return SPIRV::MemorySemantics::SequentiallyConsistent;
401  case AtomicOrdering::Monotonic:
402  case AtomicOrdering::NotAtomic:
404  }
405 }
406 
408  switch (Ord) {
410  return SPIRV::Scope::Invocation;
411  case SyncScope::System:
412  return SPIRV::Scope::Device;
413  default:
414  llvm_unreachable("Unsupported synchronization Scope ID.");
415  }
416 }
417 
419  MachineInstrBuilder &MIB) {
420  uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
421  if (MemOp->isVolatile())
422  SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
423  if (MemOp->isNonTemporal())
424  SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
425  if (MemOp->getAlign().value())
426  SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
427 
428  if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
429  MIB.addImm(SpvMemOp);
430  if (SpvMemOp & static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
431  MIB.addImm(MemOp->getAlign().value());
432  }
433 }
434 
436  uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
437  if (Flags & MachineMemOperand::Flags::MOVolatile)
438  SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
439  if (Flags & MachineMemOperand::Flags::MONonTemporal)
440  SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
441 
442  if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None))
443  MIB.addImm(SpvMemOp);
444 }
445 
446 bool SPIRVInstructionSelector::selectLoad(Register ResVReg,
447  const SPIRVType *ResType,
448  MachineInstr &I) const {
449  unsigned OpOffset =
450  I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ? 1 : 0;
451  Register Ptr = I.getOperand(1 + OpOffset).getReg();
452  auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
453  .addDef(ResVReg)
454  .addUse(GR.getSPIRVTypeID(ResType))
455  .addUse(Ptr);
456  if (!I.getNumMemOperands()) {
457  assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS);
458  addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
459  } else {
460  addMemoryOperands(*I.memoperands_begin(), MIB);
461  }
462  return MIB.constrainAllUses(TII, TRI, RBI);
463 }
464 
465 bool SPIRVInstructionSelector::selectStore(MachineInstr &I) const {
466  unsigned OpOffset =
467  I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ? 1 : 0;
468  Register StoreVal = I.getOperand(0 + OpOffset).getReg();
469  Register Ptr = I.getOperand(1 + OpOffset).getReg();
470  MachineBasicBlock &BB = *I.getParent();
471  auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpStore))
472  .addUse(Ptr)
473  .addUse(StoreVal);
474  if (!I.getNumMemOperands()) {
475  assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS);
476  addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
477  } else {
478  addMemoryOperands(*I.memoperands_begin(), MIB);
479  }
480  return MIB.constrainAllUses(TII, TRI, RBI);
481 }
482 
483 bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
484  MachineInstr &I) const {
485  MachineBasicBlock &BB = *I.getParent();
486  auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCopyMemorySized))
487  .addDef(I.getOperand(0).getReg())
488  .addUse(I.getOperand(1).getReg())
489  .addUse(I.getOperand(2).getReg());
490  if (I.getNumMemOperands())
491  addMemoryOperands(*I.memoperands_begin(), MIB);
492  bool Result = MIB.constrainAllUses(TII, TRI, RBI);
493  if (ResVReg.isValid() && ResVReg != MIB->getOperand(0).getReg()) {
494  BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg)
495  .addUse(MIB->getOperand(0).getReg());
496  }
497  return Result;
498 }
499 
500 bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
501  const SPIRVType *ResType,
502  MachineInstr &I,
503  unsigned NewOpcode) const {
504  assert(I.hasOneMemOperand());
505  const MachineMemOperand *MemOp = *I.memoperands_begin();
506  uint32_t Scope = static_cast<uint32_t>(getScope(MemOp->getSyncScopeID()));
507  Register ScopeReg = buildI32Constant(Scope, I);
508 
509  Register Ptr = I.getOperand(1).getReg();
510  // TODO: Changed as it's implemented in the translator. See test/atomicrmw.ll
511  // auto ScSem =
512  // getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr));
513  AtomicOrdering AO = MemOp->getSuccessOrdering();
514  uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
515  Register MemSemReg = buildI32Constant(MemSem /*| ScSem*/, I);
516 
517  return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode))
518  .addDef(ResVReg)
519  .addUse(GR.getSPIRVTypeID(ResType))
520  .addUse(Ptr)
521  .addUse(ScopeReg)
522  .addUse(MemSemReg)
523  .addUse(I.getOperand(2).getReg())
524  .constrainAllUses(TII, TRI, RBI);
525 }
526 
527 bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const {
528  AtomicOrdering AO = AtomicOrdering(I.getOperand(0).getImm());
529  uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
530  Register MemSemReg = buildI32Constant(MemSem, I);
531  SyncScope::ID Ord = SyncScope::ID(I.getOperand(1).getImm());
532  uint32_t Scope = static_cast<uint32_t>(getScope(Ord));
533  Register ScopeReg = buildI32Constant(Scope, I);
534  MachineBasicBlock &BB = *I.getParent();
535  return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier))
536  .addUse(ScopeReg)
537  .addUse(MemSemReg)
538  .constrainAllUses(TII, TRI, RBI);
539 }
540 
541 bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
542  const SPIRVType *ResType,
543  MachineInstr &I) const {
544  assert(I.hasOneMemOperand());
545  const MachineMemOperand *MemOp = *I.memoperands_begin();
546  uint32_t Scope = static_cast<uint32_t>(getScope(MemOp->getSyncScopeID()));
547  Register ScopeReg = buildI32Constant(Scope, I);
548 
549  Register Ptr = I.getOperand(2).getReg();
550  Register Cmp = I.getOperand(3).getReg();
551  Register Val = I.getOperand(4).getReg();
552 
553  SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
554  SPIRV::StorageClass SC = GR.getPointerStorageClass(Ptr);
555  uint32_t ScSem = static_cast<uint32_t>(getMemSemanticsForStorageClass(SC));
556  AtomicOrdering AO = MemOp->getSuccessOrdering();
557  uint32_t MemSemEq = static_cast<uint32_t>(getMemSemantics(AO)) | ScSem;
558  Register MemSemEqReg = buildI32Constant(MemSemEq, I);
559  AtomicOrdering FO = MemOp->getFailureOrdering();
560  uint32_t MemSemNeq = static_cast<uint32_t>(getMemSemantics(FO)) | ScSem;
561  Register MemSemNeqReg =
562  MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq, I);
563  const DebugLoc &DL = I.getDebugLoc();
564  return BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange))
565  .addDef(ResVReg)
566  .addUse(GR.getSPIRVTypeID(SpvValTy))
567  .addUse(Ptr)
568  .addUse(ScopeReg)
569  .addUse(MemSemEqReg)
570  .addUse(MemSemNeqReg)
571  .addUse(Val)
572  .addUse(Cmp)
573  .constrainAllUses(TII, TRI, RBI);
574 }
575 
577  switch (SC) {
578  case SPIRV::StorageClass::Workgroup:
579  case SPIRV::StorageClass::CrossWorkgroup:
580  case SPIRV::StorageClass::Function:
581  return true;
582  default:
583  return false;
584  }
585 }
586 
587 // In SPIR-V address space casting can only happen to and from the Generic
588 // storage class. We can also only case Workgroup, CrossWorkgroup, or Function
589 // pointers to and from Generic pointers. As such, we can convert e.g. from
590 // Workgroup to Function by going via a Generic pointer as an intermediary. All
591 // other combinations can only be done by a bitcast, and are probably not safe.
592 bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg,
593  const SPIRVType *ResType,
594  MachineInstr &I) const {
595  Register SrcPtr = I.getOperand(1).getReg();
596  SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
597  SPIRV::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtr);
598  SPIRV::StorageClass DstSC = GR.getPointerStorageClass(ResVReg);
599 
600  // Casting from an eligable pointer to Generic.
601  if (DstSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(SrcSC))
602  return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric);
603  // Casting from Generic to an eligable pointer.
604  if (SrcSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(DstSC))
605  return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr);
606  // Casting between 2 eligable pointers using Generic as an intermediary.
607  if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) {
608  Register Tmp = MRI->createVirtualRegister(&SPIRV::IDRegClass);
609  SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
610  SrcPtrTy, I, TII, SPIRV::StorageClass::Generic);
611  MachineBasicBlock &BB = *I.getParent();
612  const DebugLoc &DL = I.getDebugLoc();
613  bool Success = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric))
614  .addDef(Tmp)
615  .addUse(GR.getSPIRVTypeID(GenericPtrTy))
616  .addUse(SrcPtr)
617  .constrainAllUses(TII, TRI, RBI);
618  return Success && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr))
619  .addDef(ResVReg)
620  .addUse(GR.getSPIRVTypeID(ResType))
621  .addUse(Tmp)
622  .constrainAllUses(TII, TRI, RBI);
623  }
624  // TODO Should this case just be disallowed completely?
625  // We're casting 2 other arbitrary address spaces, so have to bitcast.
626  return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
627 }
628 
629 static unsigned getFCmpOpcode(unsigned PredNum) {
630  auto Pred = static_cast<CmpInst::Predicate>(PredNum);
631  switch (Pred) {
632  case CmpInst::FCMP_OEQ:
633  return SPIRV::OpFOrdEqual;
634  case CmpInst::FCMP_OGE:
635  return SPIRV::OpFOrdGreaterThanEqual;
636  case CmpInst::FCMP_OGT:
637  return SPIRV::OpFOrdGreaterThan;
638  case CmpInst::FCMP_OLE:
639  return SPIRV::OpFOrdLessThanEqual;
640  case CmpInst::FCMP_OLT:
641  return SPIRV::OpFOrdLessThan;
642  case CmpInst::FCMP_ONE:
643  return SPIRV::OpFOrdNotEqual;
644  case CmpInst::FCMP_ORD:
645  return SPIRV::OpOrdered;
646  case CmpInst::FCMP_UEQ:
647  return SPIRV::OpFUnordEqual;
648  case CmpInst::FCMP_UGE:
649  return SPIRV::OpFUnordGreaterThanEqual;
650  case CmpInst::FCMP_UGT:
651  return SPIRV::OpFUnordGreaterThan;
652  case CmpInst::FCMP_ULE:
653  return SPIRV::OpFUnordLessThanEqual;
654  case CmpInst::FCMP_ULT:
655  return SPIRV::OpFUnordLessThan;
656  case CmpInst::FCMP_UNE:
657  return SPIRV::OpFUnordNotEqual;
658  case CmpInst::FCMP_UNO:
659  return SPIRV::OpUnordered;
660  default:
661  llvm_unreachable("Unknown predicate type for FCmp");
662  }
663 }
664 
665 static unsigned getICmpOpcode(unsigned PredNum) {
666  auto Pred = static_cast<CmpInst::Predicate>(PredNum);
667  switch (Pred) {
668  case CmpInst::ICMP_EQ:
669  return SPIRV::OpIEqual;
670  case CmpInst::ICMP_NE:
671  return SPIRV::OpINotEqual;
672  case CmpInst::ICMP_SGE:
673  return SPIRV::OpSGreaterThanEqual;
674  case CmpInst::ICMP_SGT:
675  return SPIRV::OpSGreaterThan;
676  case CmpInst::ICMP_SLE:
677  return SPIRV::OpSLessThanEqual;
678  case CmpInst::ICMP_SLT:
679  return SPIRV::OpSLessThan;
680  case CmpInst::ICMP_UGE:
681  return SPIRV::OpUGreaterThanEqual;
682  case CmpInst::ICMP_UGT:
683  return SPIRV::OpUGreaterThan;
684  case CmpInst::ICMP_ULE:
685  return SPIRV::OpULessThanEqual;
686  case CmpInst::ICMP_ULT:
687  return SPIRV::OpULessThan;
688  default:
689  llvm_unreachable("Unknown predicate type for ICmp");
690  }
691 }
692 
693 static unsigned getPtrCmpOpcode(unsigned Pred) {
694  switch (static_cast<CmpInst::Predicate>(Pred)) {
695  case CmpInst::ICMP_EQ:
696  return SPIRV::OpPtrEqual;
697  case CmpInst::ICMP_NE:
698  return SPIRV::OpPtrNotEqual;
699  default:
700  llvm_unreachable("Unknown predicate type for pointer comparison");
701  }
702 }
703 
704 // Return the logical operation, or abort if none exists.
705 static unsigned getBoolCmpOpcode(unsigned PredNum) {
706  auto Pred = static_cast<CmpInst::Predicate>(PredNum);
707  switch (Pred) {
708  case CmpInst::ICMP_EQ:
709  return SPIRV::OpLogicalEqual;
710  case CmpInst::ICMP_NE:
711  return SPIRV::OpLogicalNotEqual;
712  default:
713  llvm_unreachable("Unknown predicate type for Bool comparison");
714  }
715 }
716 
717 bool SPIRVInstructionSelector::selectBitreverse(Register ResVReg,
718  const SPIRVType *ResType,
719  MachineInstr &I) const {
720  MachineBasicBlock &BB = *I.getParent();
721  return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitReverse))
722  .addDef(ResVReg)
723  .addUse(GR.getSPIRVTypeID(ResType))
724  .addUse(I.getOperand(1).getReg())
725  .constrainAllUses(TII, TRI, RBI);
726 }
727 
728 bool SPIRVInstructionSelector::selectConstVector(Register ResVReg,
729  const SPIRVType *ResType,
730  MachineInstr &I) const {
731  // TODO: only const case is supported for now.
733  I.operands_begin(), I.operands_end(), [this](const MachineOperand &MO) {
734  if (MO.isDef())
735  return true;
736  if (!MO.isReg())
737  return false;
738  SPIRVType *ConstTy = this->MRI->getVRegDef(MO.getReg());
739  assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
740  ConstTy->getOperand(1).isReg());
741  Register ConstReg = ConstTy->getOperand(1).getReg();
742  const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
743  assert(Const);
744  return (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
745  Const->getOpcode() == TargetOpcode::G_FCONSTANT);
746  }));
747 
748  auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
749  TII.get(SPIRV::OpConstantComposite))
750  .addDef(ResVReg)
751  .addUse(GR.getSPIRVTypeID(ResType));
752  for (unsigned i = I.getNumExplicitDefs(); i < I.getNumExplicitOperands(); ++i)
753  MIB.addUse(I.getOperand(i).getReg());
754  return MIB.constrainAllUses(TII, TRI, RBI);
755 }
756 
757 bool SPIRVInstructionSelector::selectCmp(Register ResVReg,
758  const SPIRVType *ResType,
759  unsigned CmpOpc,
760  MachineInstr &I) const {
761  Register Cmp0 = I.getOperand(2).getReg();
762  Register Cmp1 = I.getOperand(3).getReg();
763  assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
764  GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
765  "CMP operands should have the same type");
766  return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CmpOpc))
767  .addDef(ResVReg)
768  .addUse(GR.getSPIRVTypeID(ResType))
769  .addUse(Cmp0)
770  .addUse(Cmp1)
771  .constrainAllUses(TII, TRI, RBI);
772 }
773 
774 bool SPIRVInstructionSelector::selectICmp(Register ResVReg,
775  const SPIRVType *ResType,
776  MachineInstr &I) const {
777  auto Pred = I.getOperand(1).getPredicate();
778  unsigned CmpOpc;
779 
780  Register CmpOperand = I.getOperand(2).getReg();
781  if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
782  CmpOpc = getPtrCmpOpcode(Pred);
783  else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
784  CmpOpc = getBoolCmpOpcode(Pred);
785  else
786  CmpOpc = getICmpOpcode(Pred);
787  return selectCmp(ResVReg, ResType, CmpOpc, I);
788 }
789 
790 void SPIRVInstructionSelector::renderFImm32(MachineInstrBuilder &MIB,
791  const MachineInstr &I,
792  int OpIdx) const {
793  assert(I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
794  "Expected G_FCONSTANT");
795  const ConstantFP *FPImm = I.getOperand(1).getFPImm();
796  addNumImm(FPImm->getValueAPF().bitcastToAPInt(), MIB);
797 }
798 
799 void SPIRVInstructionSelector::renderImm32(MachineInstrBuilder &MIB,
800  const MachineInstr &I,
801  int OpIdx) const {
802  assert(I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
803  "Expected G_CONSTANT");
804  addNumImm(I.getOperand(1).getCImm()->getValue(), MIB);
805 }
806 
807 Register
808 SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I,
809  const SPIRVType *ResType) const {
810  const SPIRVType *SpvI32Ty =
811  ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32, I, TII);
812  Register NewReg;
813  NewReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
814  MachineInstr *MI;
815  MachineBasicBlock &BB = *I.getParent();
816  if (Val == 0) {
817  MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
818  .addDef(NewReg)
819  .addUse(GR.getSPIRVTypeID(SpvI32Ty));
820  } else {
821  MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
822  .addDef(NewReg)
823  .addUse(GR.getSPIRVTypeID(SpvI32Ty))
824  .addImm(APInt(32, Val).getZExtValue());
825  }
827  return NewReg;
828 }
829 
830 bool SPIRVInstructionSelector::selectFCmp(Register ResVReg,
831  const SPIRVType *ResType,
832  MachineInstr &I) const {
833  unsigned CmpOp = getFCmpOpcode(I.getOperand(1).getPredicate());
834  return selectCmp(ResVReg, ResType, CmpOp, I);
835 }
836 
837 Register SPIRVInstructionSelector::buildZerosVal(const SPIRVType *ResType,
838  MachineInstr &I) const {
839  return buildI32Constant(0, I, ResType);
840 }
841 
842 Register SPIRVInstructionSelector::buildOnesVal(bool AllOnes,
843  const SPIRVType *ResType,
844  MachineInstr &I) const {
845  unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
846  APInt One = AllOnes ? APInt::getAllOnesValue(BitWidth)
847  : APInt::getOneBitSet(BitWidth, 0);
848  Register OneReg = buildI32Constant(One.getZExtValue(), I, ResType);
849  if (ResType->getOpcode() == SPIRV::OpTypeVector) {
850  const unsigned NumEles = ResType->getOperand(2).getImm();
851  Register OneVec = MRI->createVirtualRegister(&SPIRV::IDRegClass);
852  unsigned Opcode = SPIRV::OpConstantComposite;
853  auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
854  .addDef(OneVec)
855  .addUse(GR.getSPIRVTypeID(ResType));
856  for (unsigned i = 0; i < NumEles; ++i)
857  MIB.addUse(OneReg);
859  return OneVec;
860  }
861  return OneReg;
862 }
863 
864 bool SPIRVInstructionSelector::selectSelect(Register ResVReg,
865  const SPIRVType *ResType,
866  MachineInstr &I,
867  bool IsSigned) const {
868  // To extend a bool, we need to use OpSelect between constants.
869  Register ZeroReg = buildZerosVal(ResType, I);
870  Register OneReg = buildOnesVal(IsSigned, ResType, I);
871  bool IsScalarBool =
872  GR.isScalarOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool);
873  unsigned Opcode =
874  IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectSIVCond;
875  return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
876  .addDef(ResVReg)
877  .addUse(GR.getSPIRVTypeID(ResType))
878  .addUse(I.getOperand(1).getReg())
879  .addUse(OneReg)
880  .addUse(ZeroReg)
881  .constrainAllUses(TII, TRI, RBI);
882 }
883 
884 bool SPIRVInstructionSelector::selectIToF(Register ResVReg,
885  const SPIRVType *ResType,
886  MachineInstr &I, bool IsSigned,
887  unsigned Opcode) const {
888  Register SrcReg = I.getOperand(1).getReg();
889  // We can convert bool value directly to float type without OpConvert*ToF,
890  // however the translator generates OpSelect+OpConvert*ToF, so we do the same.
891  if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
892  unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
893  SPIRVType *TmpType = GR.getOrCreateSPIRVIntegerType(BitWidth, I, TII);
894  if (ResType->getOpcode() == SPIRV::OpTypeVector) {
895  const unsigned NumElts = ResType->getOperand(2).getImm();
896  TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII);
897  }
898  SrcReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
899  selectSelect(SrcReg, TmpType, I, false);
900  }
901  return selectUnOpWithSrc(ResVReg, ResType, I, SrcReg, Opcode);
902 }
903 
904 bool SPIRVInstructionSelector::selectExt(Register ResVReg,
905  const SPIRVType *ResType,
906  MachineInstr &I, bool IsSigned) const {
907  if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool))
908  return selectSelect(ResVReg, ResType, I, IsSigned);
909  unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
910  return selectUnOp(ResVReg, ResType, I, Opcode);
911 }
912 
913 bool SPIRVInstructionSelector::selectIntToBool(Register IntReg,
914  Register ResVReg,
915  const SPIRVType *IntTy,
916  const SPIRVType *BoolTy,
917  MachineInstr &I) const {
918  // To truncate to a bool, we use OpBitwiseAnd 1 and OpINotEqual to zero.
919  Register BitIntReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
920  bool IsVectorTy = IntTy->getOpcode() == SPIRV::OpTypeVector;
921  unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
922  Register Zero = buildZerosVal(IntTy, I);
923  Register One = buildOnesVal(false, IntTy, I);
924  MachineBasicBlock &BB = *I.getParent();
925  BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
926  .addDef(BitIntReg)
927  .addUse(GR.getSPIRVTypeID(IntTy))
928  .addUse(IntReg)
929  .addUse(One)
930  .constrainAllUses(TII, TRI, RBI);
931  return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
932  .addDef(ResVReg)
933  .addUse(GR.getSPIRVTypeID(BoolTy))
934  .addUse(BitIntReg)
935  .addUse(Zero)
936  .constrainAllUses(TII, TRI, RBI);
937 }
938 
939 bool SPIRVInstructionSelector::selectTrunc(Register ResVReg,
940  const SPIRVType *ResType,
941  MachineInstr &I) const {
942  if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool)) {
943  Register IntReg = I.getOperand(1).getReg();
944  const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
945  return selectIntToBool(IntReg, ResVReg, ArgType, ResType, I);
946  }
947  bool IsSigned = GR.isScalarOrVectorSigned(ResType);
948  unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
949  return selectUnOp(ResVReg, ResType, I, Opcode);
950 }
951 
952 bool SPIRVInstructionSelector::selectConst(Register ResVReg,
953  const SPIRVType *ResType,
954  const APInt &Imm,
955  MachineInstr &I) const {
956  assert(ResType->getOpcode() != SPIRV::OpTypePointer || Imm.isNullValue());
957  MachineBasicBlock &BB = *I.getParent();
958  if (ResType->getOpcode() == SPIRV::OpTypePointer && Imm.isNullValue()) {
959  return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
960  .addDef(ResVReg)
961  .addUse(GR.getSPIRVTypeID(ResType))
962  .constrainAllUses(TII, TRI, RBI);
963  }
964  auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
965  .addDef(ResVReg)
966  .addUse(GR.getSPIRVTypeID(ResType));
967  // <=32-bit integers should be caught by the sdag pattern.
968  assert(Imm.getBitWidth() > 32);
969  addNumImm(Imm, MIB);
970  return MIB.constrainAllUses(TII, TRI, RBI);
971 }
972 
973 bool SPIRVInstructionSelector::selectOpUndef(Register ResVReg,
974  const SPIRVType *ResType,
975  MachineInstr &I) const {
976  return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
977  .addDef(ResVReg)
978  .addUse(GR.getSPIRVTypeID(ResType))
979  .constrainAllUses(TII, TRI, RBI);
980 }
981 
982 static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
983  assert(MO.isReg());
984  const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
985  if (TypeInst->getOpcode() != SPIRV::ASSIGN_TYPE)
986  return false;
987  assert(TypeInst->getOperand(1).isReg());
988  MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
989  return ImmInst->getOpcode() == TargetOpcode::G_CONSTANT;
990 }
991 
992 static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
993  const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
994  MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
995  assert(ImmInst->getOpcode() == TargetOpcode::G_CONSTANT);
996  return ImmInst->getOperand(1).getCImm()->getZExtValue();
997 }
998 
999 bool SPIRVInstructionSelector::selectInsertVal(Register ResVReg,
1000  const SPIRVType *ResType,
1001  MachineInstr &I) const {
1002  MachineBasicBlock &BB = *I.getParent();
1003  return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeInsert))
1004  .addDef(ResVReg)
1005  .addUse(GR.getSPIRVTypeID(ResType))
1006  // object to insert
1007  .addUse(I.getOperand(3).getReg())
1008  // composite to insert into
1009  .addUse(I.getOperand(2).getReg())
1010  // TODO: support arbitrary number of indices
1011  .addImm(foldImm(I.getOperand(4), MRI))
1012  .constrainAllUses(TII, TRI, RBI);
1013 }
1014 
1015 bool SPIRVInstructionSelector::selectExtractVal(Register ResVReg,
1016  const SPIRVType *ResType,
1017  MachineInstr &I) const {
1018  MachineBasicBlock &BB = *I.getParent();
1019  return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
1020  .addDef(ResVReg)
1021  .addUse(GR.getSPIRVTypeID(ResType))
1022  .addUse(I.getOperand(2).getReg())
1023  // TODO: support arbitrary number of indices
1024  .addImm(foldImm(I.getOperand(3), MRI))
1025  .constrainAllUses(TII, TRI, RBI);
1026 }
1027 
1028 bool SPIRVInstructionSelector::selectInsertElt(Register ResVReg,
1029  const SPIRVType *ResType,
1030  MachineInstr &I) const {
1031  if (isImm(I.getOperand(4), MRI))
1032  return selectInsertVal(ResVReg, ResType, I);
1033  MachineBasicBlock &BB = *I.getParent();
1034  return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorInsertDynamic))
1035  .addDef(ResVReg)
1036  .addUse(GR.getSPIRVTypeID(ResType))
1037  .addUse(I.getOperand(2).getReg())
1038  .addUse(I.getOperand(3).getReg())
1039  .addUse(I.getOperand(4).getReg())
1040  .constrainAllUses(TII, TRI, RBI);
1041 }
1042 
1043 bool SPIRVInstructionSelector::selectExtractElt(Register ResVReg,
1044  const SPIRVType *ResType,
1045  MachineInstr &I) const {
1046  if (isImm(I.getOperand(3), MRI))
1047  return selectExtractVal(ResVReg, ResType, I);
1048  MachineBasicBlock &BB = *I.getParent();
1049  return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorExtractDynamic))
1050  .addDef(ResVReg)
1051  .addUse(GR.getSPIRVTypeID(ResType))
1052  .addUse(I.getOperand(2).getReg())
1053  .addUse(I.getOperand(3).getReg())
1054  .constrainAllUses(TII, TRI, RBI);
1055 }
1056 
1057 bool SPIRVInstructionSelector::selectGEP(Register ResVReg,
1058  const SPIRVType *ResType,
1059  MachineInstr &I) const {
1060  // In general we should also support OpAccessChain instrs here (i.e. not
1061  // PtrAccessChain) but SPIRV-LLVM Translator doesn't emit them at all and so
1062  // do we to stay compliant with its test and more importantly consumers.
1063  unsigned Opcode = I.getOperand(2).getImm() ? SPIRV::OpInBoundsPtrAccessChain
1064  : SPIRV::OpPtrAccessChain;
1065  auto Res = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
1066  .addDef(ResVReg)
1067  .addUse(GR.getSPIRVTypeID(ResType))
1068  // Object to get a pointer to.
1069  .addUse(I.getOperand(3).getReg());
1070  // Adding indices.
1071  for (unsigned i = 4; i < I.getNumExplicitOperands(); ++i)
1072  Res.addUse(I.getOperand(i).getReg());
1073  return Res.constrainAllUses(TII, TRI, RBI);
1074 }
1075 
1076 bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
1077  const SPIRVType *ResType,
1078  MachineInstr &I) const {
1079  MachineBasicBlock &BB = *I.getParent();
1080  switch (I.getIntrinsicID()) {
1081  case Intrinsic::spv_load:
1082  return selectLoad(ResVReg, ResType, I);
1083  break;
1084  case Intrinsic::spv_store:
1085  return selectStore(I);
1086  break;
1087  case Intrinsic::spv_extractv:
1088  return selectExtractVal(ResVReg, ResType, I);
1089  break;
1090  case Intrinsic::spv_insertv:
1091  return selectInsertVal(ResVReg, ResType, I);
1092  break;
1093  case Intrinsic::spv_extractelt:
1094  return selectExtractElt(ResVReg, ResType, I);
1095  break;
1096  case Intrinsic::spv_insertelt:
1097  return selectInsertElt(ResVReg, ResType, I);
1098  break;
1099  case Intrinsic::spv_gep:
1100  return selectGEP(ResVReg, ResType, I);
1101  break;
1102  case Intrinsic::spv_unref_global:
1103  case Intrinsic::spv_init_global: {
1104  MachineInstr *MI = MRI->getVRegDef(I.getOperand(1).getReg());
1105  MachineInstr *Init = I.getNumExplicitOperands() > 2
1106  ? MRI->getVRegDef(I.getOperand(2).getReg())
1107  : nullptr;
1108  assert(MI);
1109  return selectGlobalValue(MI->getOperand(0).getReg(), *MI, Init);
1110  } break;
1111  case Intrinsic::spv_const_composite: {
1112  // If no values are attached, the composite is null constant.
1113  bool IsNull = I.getNumExplicitDefs() + 1 == I.getNumExplicitOperands();
1114  unsigned Opcode =
1115  IsNull ? SPIRV::OpConstantNull : SPIRV::OpConstantComposite;
1116  auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1117  .addDef(ResVReg)
1118  .addUse(GR.getSPIRVTypeID(ResType));
1119  // skip type MD node we already used when generated assign.type for this
1120  if (!IsNull) {
1121  for (unsigned i = I.getNumExplicitDefs() + 1;
1122  i < I.getNumExplicitOperands(); ++i) {
1123  MIB.addUse(I.getOperand(i).getReg());
1124  }
1125  }
1126  return MIB.constrainAllUses(TII, TRI, RBI);
1127  } break;
1128  case Intrinsic::spv_assign_name: {
1129  auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpName));
1130  MIB.addUse(I.getOperand(I.getNumExplicitDefs() + 1).getReg());
1131  for (unsigned i = I.getNumExplicitDefs() + 2;
1132  i < I.getNumExplicitOperands(); ++i) {
1133  MIB.addImm(I.getOperand(i).getImm());
1134  }
1135  return MIB.constrainAllUses(TII, TRI, RBI);
1136  } break;
1137  case Intrinsic::spv_switch: {
1138  auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSwitch));
1139  for (unsigned i = 1; i < I.getNumExplicitOperands(); ++i) {
1140  if (I.getOperand(i).isReg())
1141  MIB.addReg(I.getOperand(i).getReg());
1142  else if (I.getOperand(i).isCImm())
1143  addNumImm(I.getOperand(i).getCImm()->getValue(), MIB);
1144  else if (I.getOperand(i).isMBB())
1145  MIB.addMBB(I.getOperand(i).getMBB());
1146  else
1147  llvm_unreachable("Unexpected OpSwitch operand");
1148  }
1149  return MIB.constrainAllUses(TII, TRI, RBI);
1150  } break;
1151  default:
1152  llvm_unreachable("Intrinsic selection not implemented");
1153  }
1154  return true;
1155 }
1156 
1157 bool SPIRVInstructionSelector::selectFrameIndex(Register ResVReg,
1158  const SPIRVType *ResType,
1159  MachineInstr &I) const {
1160  return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
1161  .addDef(ResVReg)
1162  .addUse(GR.getSPIRVTypeID(ResType))
1163  .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function))
1164  .constrainAllUses(TII, TRI, RBI);
1165 }
1166 
1167 bool SPIRVInstructionSelector::selectBranch(MachineInstr &I) const {
1168  // InstructionSelector walks backwards through the instructions. We can use
1169  // both a G_BR and a G_BRCOND to create an OpBranchConditional. We hit G_BR
1170  // first, so can generate an OpBranchConditional here. If there is no
1171  // G_BRCOND, we just use OpBranch for a regular unconditional branch.
1172  const MachineInstr *PrevI = I.getPrevNode();
1173  MachineBasicBlock &MBB = *I.getParent();
1174  if (PrevI != nullptr && PrevI->getOpcode() == TargetOpcode::G_BRCOND) {
1175  return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
1176  .addUse(PrevI->getOperand(0).getReg())
1177  .addMBB(PrevI->getOperand(1).getMBB())
1178  .addMBB(I.getOperand(0).getMBB())
1179  .constrainAllUses(TII, TRI, RBI);
1180  }
1181  return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranch))
1182  .addMBB(I.getOperand(0).getMBB())
1183  .constrainAllUses(TII, TRI, RBI);
1184 }
1185 
1186 bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &I) const {
1187  // InstructionSelector walks backwards through the instructions. For an
1188  // explicit conditional branch with no fallthrough, we use both a G_BR and a
1189  // G_BRCOND to create an OpBranchConditional. We should hit G_BR first, and
1190  // generate the OpBranchConditional in selectBranch above.
1191  //
1192  // If an OpBranchConditional has been generated, we simply return, as the work
1193  // is alread done. If there is no OpBranchConditional, LLVM must be relying on
1194  // implicit fallthrough to the next basic block, so we need to create an
1195  // OpBranchConditional with an explicit "false" argument pointing to the next
1196  // basic block that LLVM would fall through to.
1197  const MachineInstr *NextI = I.getNextNode();
1198  // Check if this has already been successfully selected.
1199  if (NextI != nullptr && NextI->getOpcode() == SPIRV::OpBranchConditional)
1200  return true;
1201  // Must be relying on implicit block fallthrough, so generate an
1202  // OpBranchConditional with the "next" basic block as the "false" target.
1203  MachineBasicBlock &MBB = *I.getParent();
1204  unsigned NextMBBNum = MBB.getNextNode()->getNumber();
1205  MachineBasicBlock *NextMBB = I.getMF()->getBlockNumbered(NextMBBNum);
1206  return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
1207  .addUse(I.getOperand(0).getReg())
1208  .addMBB(I.getOperand(1).getMBB())
1209  .addMBB(NextMBB)
1210  .constrainAllUses(TII, TRI, RBI);
1211 }
1212 
1213 bool SPIRVInstructionSelector::selectPhi(Register ResVReg,
1214  const SPIRVType *ResType,
1215  MachineInstr &I) const {
1216  auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpPhi))
1217  .addDef(ResVReg)
1218  .addUse(GR.getSPIRVTypeID(ResType));
1219  const unsigned NumOps = I.getNumOperands();
1220  for (unsigned i = 1; i < NumOps; i += 2) {
1221  MIB.addUse(I.getOperand(i + 0).getReg());
1222  MIB.addMBB(I.getOperand(i + 1).getMBB());
1223  }
1224  return MIB.constrainAllUses(TII, TRI, RBI);
1225 }
1226 
1227 bool SPIRVInstructionSelector::selectGlobalValue(
1228  Register ResVReg, MachineInstr &I, const MachineInstr *Init) const {
1229  // FIXME: don't use MachineIRBuilder here, replace it with BuildMI.
1230  MachineIRBuilder MIRBuilder(I);
1231  const GlobalValue *GV = I.getOperand(1).getGlobal();
1232  SPIRVType *ResType = GR.getOrCreateSPIRVType(
1233  GV->getType(), MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false);
1234 
1235  std::string GlobalIdent = GV->getGlobalIdentifier();
1236  // TODO: suport @llvm.global.annotations.
1237  auto GlobalVar = cast<GlobalVariable>(GV);
1238 
1239  bool HasInit = GlobalVar->hasInitializer() &&
1240  !isa<UndefValue>(GlobalVar->getInitializer());
1241  // Skip empty declaration for GVs with initilaizers till we get the decl with
1242  // passed initializer.
1243  if (HasInit && !Init)
1244  return true;
1245 
1246  unsigned AddrSpace = GV->getAddressSpace();
1247  SPIRV::StorageClass Storage = addressSpaceToStorageClass(AddrSpace);
1248  bool HasLnkTy = GV->getLinkage() != GlobalValue::InternalLinkage &&
1249  Storage != SPIRV::StorageClass::Function;
1250  SPIRV::LinkageType LnkType =
1252  ? SPIRV::LinkageType::Import
1253  : SPIRV::LinkageType::Export;
1254 
1255  Register Reg = GR.buildGlobalVariable(ResVReg, ResType, GlobalIdent, GV,
1256  Storage, Init, GlobalVar->isConstant(),
1257  HasLnkTy, LnkType, MIRBuilder, true);
1258  return Reg.isValid();
1259 }
1260 
1261 namespace llvm {
1264  const SPIRVSubtarget &Subtarget,
1265  const RegisterBankInfo &RBI) {
1266  return new SPIRVInstructionSelector(TM, Subtarget, RBI);
1267 }
1268 } // namespace llvm
i
i
Definition: README.txt:29
llvm::GlobalValue::getGlobalIdentifier
static std::string getGlobalIdentifier(StringRef Name, GlobalValue::LinkageTypes Linkage, StringRef FileName)
Return the modified name for a global value suitable to be used as the key for a global lookup (e....
Definition: Globals.cpp:142
llvm::SPIRV::Scope
Scope
Definition: SPIRVBaseInfo.h:676
GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_PREDICATES_INIT
getName
static StringRef getName(Value *V)
Definition: ProvenanceAnalysisEvaluator.cpp:42
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:104
llvm::MachineInstrBuilder::addImm
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Definition: MachineInstrBuilder.h:131
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:17
llvm::tgtok::Def
@ Def
Definition: TGLexer.h:50
llvm::SPIRV::LinkageType
LinkageType
Definition: SPIRVBaseInfo.h:431
llvm::MachineRegisterInfo::createVirtualRegister
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
Definition: MachineRegisterInfo.cpp:156
llvm::CmpInst::Predicate
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:719
llvm::GISelKnownBits
Definition: GISelKnownBits.h:29
llvm::MachineRegisterInfo
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Definition: MachineRegisterInfo.h:50
llvm::GlobalValue::getLinkage
LinkageTypes getLinkage() const
Definition: GlobalValue.h:509
llvm::ilist_node_with_parent::getNextNode
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:289
llvm::isPreISelGenericOpcode
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
Definition: TargetOpcodes.h:30
isTypeFoldingSupported
bool isTypeFoldingSupported(unsigned Opcode)
Definition: SPIRVLegalizerInfo.cpp:53
llvm::X86Disassembler::Reg
Reg
All possible values of the reg field in the ModR/M byte.
Definition: X86DisassemblerDecoder.h:462
llvm::MemOp
Definition: TargetLowering.h:111
llvm::MachineMemOperand
A description of a memory reference used in the backend.
Definition: MachineMemOperand.h:127
llvm::SPIRVSubtarget
Definition: SPIRVSubtarget.h:36
llvm::ConstantFP::getValueAPF
const APFloat & getValueAPF() const
Definition: Constants.h:298
llvm::errs
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
Definition: raw_ostream.cpp:893
TRI
unsigned const TargetRegisterInfo * TRI
Definition: MachineSink.cpp:1628
GET_GLOBALISEL_TEMPORARIES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
llvm::constrainSelectedInstRegOperands
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition: Utils.cpp:150
isImm
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
Definition: SPIRVInstructionSelector.cpp:982
MachineRegisterInfo.h
include
include(LLVM-Build) add_subdirectory(IR) add_subdirectory(FuzzMutate) add_subdirectory(FileCheck) add_subdirectory(InterfaceStub) add_subdirectory(IRReader) add_subdirectory(CodeGen) add_subdirectory(BinaryFormat) add_subdirectory(Bitcode) add_subdirectory(Bitstream) add_subdirectory(DWARFLinker) add_subdirectory(Extensions) add_subdirectory(Frontend) add_subdirectory(Transforms) add_subdirectory(Linker) add_subdirectory(Analysis) add_subdirectory(LTO) add_subdirectory(MC) add_subdirectory(MCA) add_subdirectory(ObjCopy) add_subdirectory(Object) add_subdirectory(ObjectYAML) add_subdirectory(Option) add_subdirectory(Remarks) add_subdirectory(Debuginfod) add_subdirectory(DebugInfo) add_subdirectory(DWP) add_subdirectory(ExecutionEngine) add_subdirectory(Target) add_subdirectory(AsmParser) add_subdirectory(LineEditor) add_subdirectory(ProfileData) add_subdirectory(Passes) add_subdirectory(TextAPI) add_subdirectory(ToolDrivers) add_subdirectory(XRay) if(LLVM_INCLUDE_TESTS) add_subdirectory(Testing) endif() add_subdirectory(WindowsDriver) add_subdirectory(WindowsManifest) set(LLVMCONFIGLIBRARYDEPENDENCIESINC "$
Definition: CMakeLists.txt:1
llvm::MachineInstrBuilder::addDef
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Definition: MachineInstrBuilder.h:116
llvm::all_of
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1617
llvm::BlockFrequencyInfo
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Definition: BlockFrequencyInfo.h:37
llvm::MachineFunction::getRegInfo
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Definition: MachineFunction.h:666
select
into xmm2 addss xmm2 xmm1 xmm3 addss xmm3 movaps xmm0 unpcklps xmm0 ret seems silly when it could just be one addps Expand libm rounding functions main should enable SSE DAZ mode and other fast SSE modes Think about doing i64 math in SSE regs on x86 This testcase should have no SSE instructions in and only one load from a constant double ret double C the select is being which prevents the dag combiner from turning select(load CPI1)
llvm::MachineInstrBuilder::addMBB
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Definition: MachineInstrBuilder.h:146
llvm::GlobalValue::isDeclaration
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:264
llvm::PPCISD::SC
@ SC
CHAIN = SC CHAIN, Imm128 - System call.
Definition: PPCISelLowering.h:418
llvm::MachineRegisterInfo::setType
void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
Definition: MachineRegisterInfo.cpp:180
llvm::MachineOperand::getImm
int64_t getImm() const
Definition: MachineOperand.h:546
llvm::MachineInstr::getOperand
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:501
llvm::ms_demangle::QualifierMangleMode::Result
@ Result
TII
const HexagonInstrInfo * TII
Definition: HexagonCopyToCombine.cpp:125
llvm::MachineOperand
MachineOperand class - Representation of each machine instruction operand.
Definition: MachineOperand.h:48
SPIRVRegisterInfo.h
SPIRVUtils.h
llvm::APInt::getZExtValue
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1466
llvm::ConstantFP
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:257
APFloat.h
llvm::SyncScope::System
@ System
Synchronized with respect to all concurrently executing threads.
Definition: LLVMContext.h:58
llvm::APFloat::bitcastToAPInt
APInt bitcastToAPInt() const
Definition: APFloat.h:1130
Register
Promote Memory to Register
Definition: Mem2Reg.cpp:110
llvm::MachineRegisterInfo::getVRegDef
MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
Definition: MachineRegisterInfo.cpp:396
llvm::None
const NoneType None
Definition: None.h:24
foldImm
static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
Definition: SPIRVInstructionSelector.cpp:992
llvm::MachineBasicBlock
Definition: MachineBasicBlock.h:94
llvm::ProfileSummaryInfo
Analysis providing profile information.
Definition: ProfileSummaryInfo.h:39
getMemSemanticsForStorageClass
SPIRV::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass SC)
Definition: SPIRVUtils.cpp:167
llvm::SPIRV::MemorySemantics
MemorySemantics
Definition: SPIRVBaseInfo.h:647
InstructionSelector.h
SPIRVTargetMachine.h
SPIRVInstrInfo.h
llvm::AtomicOrdering
AtomicOrdering
Atomic ordering for LLVM's memory model.
Definition: AtomicOrdering.h:56
llvm::SPIRVRegisterInfo
Definition: SPIRVRegisterInfo.h:23
llvm::GlobalValue::hasAvailableExternallyLinkage
bool hasAvailableExternallyLinkage() const
Definition: GlobalValue.h:477
addressSpaceToStorageClass
SPIRV::StorageClass addressSpaceToStorageClass(unsigned AddrSpace)
Definition: SPIRVUtils.cpp:148
llvm::RegisterBankInfo
Holds all the information related to register banks.
Definition: RegisterBankInfo.h:39
llvm::GlobalValue
Definition: GlobalValue.h:44
llvm::InstructionSelector
Provides the logic to select generic machine instructions.
Definition: InstructionSelector.h:424
llvm::MachineIRBuilder
Helper class to build MachineInstr.
Definition: MachineIRBuilder.h:219
llvm::CodeGenCoverage
Definition: CodeGenCoverage.h:19
isGenericCastablePtr
static bool isGenericCastablePtr(SPIRV::StorageClass SC)
Definition: SPIRVInstructionSelector.cpp:576
llvm::MachineOperand::isReg
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Definition: MachineOperand.h:320
llvm::MachineOperand::getCImm
const ConstantInt * getCImm() const
Definition: MachineOperand.h:551
llvm::MachineInstr
Representation of each machine instruction.
Definition: MachineInstr.h:66
llvm::MachineInstrBuilder
Definition: MachineInstrBuilder.h:69
SPIRVGlobalRegistry.h
llvm::SPIRV::StorageClass
StorageClass
Definition: SPIRVBaseInfo.h:255
uint64_t
llvm::ARM_MB::ST
@ ST
Definition: ARMBaseInfo.h:73
DEBUG_TYPE
#define DEBUG_TYPE
Definition: SPIRVInstructionSelector.cpp:30
Generic
@ Generic
Definition: AArch64MCAsmInfo.cpp:23
llvm::SyncScope::ID
uint8_t ID
Definition: LLVMContext.h:47
I
#define I(x, y, z)
Definition: MD5.cpp:58
addNumImm
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
Definition: SPIRVUtils.cpp:69
SPIRV.h
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
getMemSemantics
static SPIRV::MemorySemantics getMemSemantics(AtomicOrdering Ord)
Definition: SPIRVInstructionSelector.cpp:390
llvm::MachineRegisterInfo::createGenericVirtualRegister
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
Definition: MachineRegisterInfo.cpp:186
llvm::SPIRVGlobalRegistry
Definition: SPIRVGlobalRegistry.h:26
llvm::MachineInstrBuilder::addReg
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Definition: MachineInstrBuilder.h:97
llvm::MachineInstrBuilder::addUse
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
Definition: MachineInstrBuilder.h:123
llvm::MachineOperand::getReg
Register getReg() const
getReg - Returns the register number.
Definition: MachineOperand.h:359
llvm::MachineInstrBuilder::constrainAllUses
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
Definition: MachineInstrBuilder.h:320
llvm::APInt
Class for arbitrary precision integers.
Definition: APInt.h:75
llvm::MachineFunction
Definition: MachineFunction.h:257
addMemoryOperands
static void addMemoryOperands(MachineMemOperand *MemOp, MachineInstrBuilder &MIB)
Definition: SPIRVInstructionSelector.cpp:418
llvm::SPIRVInstrInfo
Definition: SPIRVInstrInfo.h:24
llvm::MachineOperand::getMBB
MachineBasicBlock * getMBB() const
Definition: MachineOperand.h:561
llvm::MachineBasicBlock::getNumber
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
Definition: MachineBasicBlock.h:1088
llvm::MachineInstr::getOpcode
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:491
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:143
uint32_t
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::AMDGPUISD::BFI
@ BFI
Definition: AMDGPUISelLowering.h:429
InstructionSelectorImpl.h
llvm::Init
Definition: Record.h:281
MRI
unsigned const MachineRegisterInfo * MRI
Definition: AArch64AdvSIMDScalarPass.cpp:105
llvm::SPIRVTargetMachine
Definition: SPIRVTargetMachine.h:20
llvm::Register
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
llvm::ConstantInt::getZExtValue
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:142
llvm::MachineRegisterInfo::replaceRegWith
void replaceRegWith(Register FromReg, Register ToReg)
replaceRegWith - Replace all instances of FromReg with ToReg in the machine function.
Definition: MachineRegisterInfo.cpp:378
MBB
MachineBasicBlock & MBB
Definition: AArch64SLSHardening.cpp:74
getPtrCmpOpcode
static unsigned getPtrCmpOpcode(unsigned Pred)
Definition: SPIRVInstructionSelector.cpp:693
llvm::X86::FirstMacroFusionInstKind::Cmp
@ Cmp
Success
#define Success
Definition: AArch64Disassembler.cpp:280
getFCmpOpcode
static unsigned getFCmpOpcode(unsigned PredNum)
Definition: SPIRVInstructionSelector.cpp:629
llvm::BitWidth
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:147
SPIRVRegisterBankInfo.h
Unordered
QP Compare Ordered Unordered
Definition: README_P9.txt:299
llvm::GlobalValue::getAddressSpace
unsigned getAddressSpace() const
Definition: Globals.cpp:121
getICmpOpcode
static unsigned getICmpOpcode(unsigned PredNum)
Definition: SPIRVInstructionSelector.cpp:665
llvm::RISCVMatInt::Imm
@ Imm
Definition: RISCVMatInt.h:23
MachineInstrBuilder.h
llvm::BuildMI
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
Definition: MachineInstrBuilder.h:328
llvm::Register::isValid
bool isValid() const
Definition: Register.h:126
getBoolCmpOpcode
static unsigned getBoolCmpOpcode(unsigned PredNum)
Definition: SPIRVInstructionSelector.cpp:705
llvm::createSPIRVInstructionSelector
InstructionSelector * createSPIRVInstructionSelector(const SPIRVTargetMachine &TM, const SPIRVSubtarget &Subtarget, const RegisterBankInfo &RBI)
Definition: SPIRVInstructionSelector.cpp:1263
llvm::lltok::GlobalVar
@ GlobalVar
Definition: LLToken.h:416
llvm::GlobalValue::getType
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:276
TM
const char LLVMTargetMachineRef TM
Definition: PassBuilderBindings.cpp:47
BB
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
Definition: README.txt:39
llvm::DebugLoc
A debug info location.
Definition: DebugLoc.h:33
llvm::mca::selectImpl
static uint64_t selectImpl(uint64_t CandidateMask, uint64_t &NextInSequenceMask)
Definition: ResourceManager.cpp:26
Debug.h
getScope
static SPIRV::Scope getScope(SyncScope::ID Ord)
Definition: SPIRVInstructionSelector.cpp:407
llvm::SyncScope::SingleThread
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
Definition: LLVMContext.h:55
llvm::Intrinsic::ID
unsigned ID
Definition: TargetTransformInfo.h:38