LLVM  10.0.0svn
AMDGPUInstructionSelector.cpp
Go to the documentation of this file.
1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// AMDGPU.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
15 #include "AMDGPUInstrInfo.h"
16 #include "AMDGPURegisterBankInfo.h"
17 #include "AMDGPURegisterInfo.h"
18 #include "AMDGPUSubtarget.h"
19 #include "AMDGPUTargetMachine.h"
21 #include "SIMachineFunctionInfo.h"
33 #include "llvm/IR/Type.h"
34 #include "llvm/Support/Debug.h"
36 
37 #define DEBUG_TYPE "amdgpu-isel"
38 
39 using namespace llvm;
40 using namespace MIPatternMatch;
41 
42 #define GET_GLOBALISEL_IMPL
43 #define AMDGPUSubtarget GCNSubtarget
44 #include "AMDGPUGenGlobalISel.inc"
45 #undef GET_GLOBALISEL_IMPL
46 #undef AMDGPUSubtarget
47 
49  const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
50  const AMDGPUTargetMachine &TM)
51  : InstructionSelector(), TII(*STI.getInstrInfo()),
52  TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
53  STI(STI),
54  EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
56 #include "AMDGPUGenGlobalISel.inc"
59 #include "AMDGPUGenGlobalISel.inc"
61 {
62 }
63 
64 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
65 
68  MRI = &MF.getRegInfo();
69  InstructionSelector::setupMF(MF, KB, CoverageInfo);
70 }
71 
72 static bool isSCC(Register Reg, const MachineRegisterInfo &MRI) {
74  return Reg == AMDGPU::SCC;
75 
76  auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
77  const TargetRegisterClass *RC =
78  RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
79  if (RC) {
80  // FIXME: This is ambiguous for wave32. This could be SCC or VCC, but the
81  // context of the register bank has been lost.
82  if (RC->getID() != AMDGPU::SReg_32_XM0RegClassID)
83  return false;
84  const LLT Ty = MRI.getType(Reg);
85  return Ty.isValid() && Ty.getSizeInBits() == 1;
86  }
87 
88  const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
89  return RB->getID() == AMDGPU::SCCRegBankID;
90 }
91 
92 bool AMDGPUInstructionSelector::isVCC(Register Reg,
93  const MachineRegisterInfo &MRI) const {
95  return Reg == TRI.getVCC();
96 
97  auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
98  const TargetRegisterClass *RC =
99  RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
100  if (RC) {
101  const LLT Ty = MRI.getType(Reg);
102  return RC->hasSuperClassEq(TRI.getBoolRC()) &&
103  Ty.isValid() && Ty.getSizeInBits() == 1;
104  }
105 
106  const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
107  return RB->getID() == AMDGPU::VCCRegBankID;
108 }
109 
110 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
111  const DebugLoc &DL = I.getDebugLoc();
112  MachineBasicBlock *BB = I.getParent();
113  I.setDesc(TII.get(TargetOpcode::COPY));
114 
115  const MachineOperand &Src = I.getOperand(1);
116  MachineOperand &Dst = I.getOperand(0);
117  Register DstReg = Dst.getReg();
118  Register SrcReg = Src.getReg();
119 
120  if (isVCC(DstReg, *MRI)) {
121  if (SrcReg == AMDGPU::SCC) {
122  const TargetRegisterClass *RC
123  = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
124  if (!RC)
125  return true;
126  return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
127  }
128 
129  if (!isVCC(SrcReg, *MRI)) {
130  // TODO: Should probably leave the copy and let copyPhysReg expand it.
131  if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI))
132  return false;
133 
134  BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
135  .addImm(0)
136  .addReg(SrcReg);
137 
138  if (!MRI->getRegClassOrNull(SrcReg))
139  MRI->setRegClass(SrcReg, TRI.getConstrainedRegClassForOperand(Src, *MRI));
140  I.eraseFromParent();
141  return true;
142  }
143 
144  const TargetRegisterClass *RC =
145  TRI.getConstrainedRegClassForOperand(Dst, *MRI);
146  if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
147  return false;
148 
149  // Don't constrain the source register to a class so the def instruction
150  // handles it (unless it's undef).
151  //
152  // FIXME: This is a hack. When selecting the def, we neeed to know
153  // specifically know that the result is VCCRegBank, and not just an SGPR
154  // with size 1. An SReg_32 with size 1 is ambiguous with wave32.
155  if (Src.isUndef()) {
156  const TargetRegisterClass *SrcRC =
157  TRI.getConstrainedRegClassForOperand(Src, *MRI);
158  if (SrcRC && !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
159  return false;
160  }
161 
162  return true;
163  }
164 
165  for (const MachineOperand &MO : I.operands()) {
166  if (Register::isPhysicalRegister(MO.getReg()))
167  continue;
168 
169  const TargetRegisterClass *RC =
170  TRI.getConstrainedRegClassForOperand(MO, *MRI);
171  if (!RC)
172  continue;
173  RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
174  }
175  return true;
176 }
177 
178 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
179  const Register DefReg = I.getOperand(0).getReg();
180  const LLT DefTy = MRI->getType(DefReg);
181 
182  // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
183 
184  const RegClassOrRegBank &RegClassOrBank =
185  MRI->getRegClassOrRegBank(DefReg);
186 
187  const TargetRegisterClass *DefRC
188  = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
189  if (!DefRC) {
190  if (!DefTy.isValid()) {
191  LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
192  return false;
193  }
194 
195  const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
196  if (RB.getID() == AMDGPU::SCCRegBankID) {
197  LLVM_DEBUG(dbgs() << "illegal scc phi\n");
198  return false;
199  }
200 
201  DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB, *MRI);
202  if (!DefRC) {
203  LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
204  return false;
205  }
206  }
207 
208  I.setDesc(TII.get(TargetOpcode::PHI));
209  return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
210 }
211 
213 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
214  const TargetRegisterClass &SubRC,
215  unsigned SubIdx) const {
216 
217  MachineInstr *MI = MO.getParent();
218  MachineBasicBlock *BB = MO.getParent()->getParent();
219  Register DstReg = MRI->createVirtualRegister(&SubRC);
220 
221  if (MO.isReg()) {
222  unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
223  Register Reg = MO.getReg();
224  BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
225  .addReg(Reg, 0, ComposedSubIdx);
226 
227  return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
228  MO.isKill(), MO.isDead(), MO.isUndef(),
229  MO.isEarlyClobber(), 0, MO.isDebug(),
230  MO.isInternalRead());
231  }
232 
233  assert(MO.isImm());
234 
235  APInt Imm(64, MO.getImm());
236 
237  switch (SubIdx) {
238  default:
239  llvm_unreachable("do not know to split immediate with this sub index.");
240  case AMDGPU::sub0:
241  return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
242  case AMDGPU::sub1:
243  return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
244  }
245 }
246 
247 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
248  switch (Opc) {
249  case AMDGPU::G_AND:
250  return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
251  case AMDGPU::G_OR:
252  return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
253  case AMDGPU::G_XOR:
254  return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
255  default:
256  llvm_unreachable("not a bit op");
257  }
258 }
259 
260 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
261  MachineOperand &Dst = I.getOperand(0);
262  MachineOperand &Src0 = I.getOperand(1);
263  MachineOperand &Src1 = I.getOperand(2);
264  Register DstReg = Dst.getReg();
265  unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
266 
267  const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
268  if (DstRB->getID() == AMDGPU::VCCRegBankID) {
269  const TargetRegisterClass *RC = TRI.getBoolRC();
270  unsigned InstOpc = getLogicalBitOpcode(I.getOpcode(),
271  RC == &AMDGPU::SReg_64RegClass);
272  I.setDesc(TII.get(InstOpc));
273 
274  // FIXME: Hack to avoid turning the register bank into a register class.
275  // The selector for G_ICMP relies on seeing the register bank for the result
276  // is VCC. In wave32 if we constrain the registers to SReg_32 here, it will
277  // be ambiguous whether it's a scalar or vector bool.
278  if (Src0.isUndef() && !MRI->getRegClassOrNull(Src0.getReg()))
279  MRI->setRegClass(Src0.getReg(), RC);
280  if (Src1.isUndef() && !MRI->getRegClassOrNull(Src1.getReg()))
281  MRI->setRegClass(Src1.getReg(), RC);
282 
283  return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
284  }
285 
286  // TODO: Should this allow an SCC bank result, and produce a copy from SCC for
287  // the result?
288  if (DstRB->getID() == AMDGPU::SGPRRegBankID) {
289  unsigned InstOpc = getLogicalBitOpcode(I.getOpcode(), Size > 32);
290  I.setDesc(TII.get(InstOpc));
291  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
292  }
293 
294  return false;
295 }
296 
297 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
298  MachineBasicBlock *BB = I.getParent();
299  MachineFunction *MF = BB->getParent();
300  Register DstReg = I.getOperand(0).getReg();
301  const DebugLoc &DL = I.getDebugLoc();
302  unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
303  const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
304  const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
305  const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
306 
307  if (Size == 32) {
308  if (IsSALU) {
309  const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
310  MachineInstr *Add =
311  BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
312  .add(I.getOperand(1))
313  .add(I.getOperand(2));
314  I.eraseFromParent();
315  return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
316  }
317 
318  if (STI.hasAddNoCarry()) {
319  const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
320  I.setDesc(TII.get(Opc));
322  I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
323  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
324  }
325 
326  const unsigned Opc = Sub ? AMDGPU::V_SUB_I32_e64 : AMDGPU::V_ADD_I32_e64;
327 
328  Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass());
330  = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
331  .addDef(UnusedCarry, RegState::Dead)
332  .add(I.getOperand(1))
333  .add(I.getOperand(2))
334  .addImm(0);
335  I.eraseFromParent();
336  return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
337  }
338 
339  assert(!Sub && "illegal sub should not reach here");
340 
341  const TargetRegisterClass &RC
342  = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
343  const TargetRegisterClass &HalfRC
344  = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
345 
346  MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
347  MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
348  MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
349  MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
350 
351  Register DstLo = MRI->createVirtualRegister(&HalfRC);
352  Register DstHi = MRI->createVirtualRegister(&HalfRC);
353 
354  if (IsSALU) {
355  BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
356  .add(Lo1)
357  .add(Lo2);
358  BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
359  .add(Hi1)
360  .add(Hi2);
361  } else {
362  const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
363  Register CarryReg = MRI->createVirtualRegister(CarryRC);
364  BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_I32_e64), DstLo)
365  .addDef(CarryReg)
366  .add(Lo1)
367  .add(Lo2)
368  .addImm(0);
369  MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
370  .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead)
371  .add(Hi1)
372  .add(Hi2)
373  .addReg(CarryReg, RegState::Kill)
374  .addImm(0);
375 
376  if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
377  return false;
378  }
379 
380  BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
381  .addReg(DstLo)
382  .addImm(AMDGPU::sub0)
383  .addReg(DstHi)
384  .addImm(AMDGPU::sub1);
385 
386 
387  if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
388  return false;
389 
390  I.eraseFromParent();
391  return true;
392 }
393 
394 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO(MachineInstr &I) const {
395  MachineBasicBlock *BB = I.getParent();
396  MachineFunction *MF = BB->getParent();
397  MachineRegisterInfo &MRI = MF->getRegInfo();
398  const DebugLoc &DL = I.getDebugLoc();
399  Register Dst0Reg = I.getOperand(0).getReg();
400  Register Dst1Reg = I.getOperand(1).getReg();
401  const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO;
402 
403  if (!isSCC(Dst1Reg, MRI)) {
404  // The name of the opcodes are misleading. v_add_i32/v_sub_i32 have unsigned
405  // carry out despite the _i32 name. These were renamed in VI to _U32.
406  // FIXME: We should probably rename the opcodes here.
407  unsigned NewOpc = IsAdd ? AMDGPU::V_ADD_I32_e64 : AMDGPU::V_SUB_I32_e64;
408  I.setDesc(TII.get(NewOpc));
409  I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
411  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
412  }
413 
414  Register Src0Reg = I.getOperand(2).getReg();
415  Register Src1Reg = I.getOperand(3).getReg();
416  unsigned NewOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
417  BuildMI(*BB, &I, DL, TII.get(NewOpc), Dst0Reg)
418  .add(I.getOperand(2))
419  .add(I.getOperand(3));
420  BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg)
421  .addReg(AMDGPU::SCC);
422 
423  if (!MRI.getRegClassOrNull(Dst1Reg))
424  MRI.setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass);
425 
426  if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, MRI) ||
427  !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, MRI) ||
428  !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, MRI))
429  return false;
430 
431  I.eraseFromParent();
432  return true;
433 }
434 
435 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
436  MachineBasicBlock *BB = I.getParent();
437  unsigned Offset = I.getOperand(2).getImm();
438  if (Offset % 32 != 0)
439  return false;
440 
441  unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32);
442  const DebugLoc &DL = I.getDebugLoc();
443  MachineInstr *Copy = BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY),
444  I.getOperand(0).getReg())
445  .addReg(I.getOperand(1).getReg(), 0, SubReg);
446 
447  for (const MachineOperand &MO : Copy->operands()) {
448  const TargetRegisterClass *RC =
449  TRI.getConstrainedRegClassForOperand(MO, *MRI);
450  if (!RC)
451  continue;
452  RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
453  }
454  I.eraseFromParent();
455  return true;
456 }
457 
458 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
459  MachineBasicBlock *BB = MI.getParent();
460  Register DstReg = MI.getOperand(0).getReg();
461  LLT DstTy = MRI->getType(DstReg);
462  LLT SrcTy = MRI->getType(MI.getOperand(1).getReg());
463 
464  const unsigned SrcSize = SrcTy.getSizeInBits();
465  if (SrcSize < 32)
466  return false;
467 
468  const DebugLoc &DL = MI.getDebugLoc();
469  const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
470  const unsigned DstSize = DstTy.getSizeInBits();
471  const TargetRegisterClass *DstRC =
472  TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
473  if (!DstRC)
474  return false;
475 
476  ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
477  MachineInstrBuilder MIB =
478  BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
479  for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
480  MachineOperand &Src = MI.getOperand(I + 1);
481  MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
482  MIB.addImm(SubRegs[I]);
483 
484  const TargetRegisterClass *SrcRC
485  = TRI.getConstrainedRegClassForOperand(Src, *MRI);
486  if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI))
487  return false;
488  }
489 
490  if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
491  return false;
492 
493  MI.eraseFromParent();
494  return true;
495 }
496 
497 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
498  MachineBasicBlock *BB = MI.getParent();
499  const int NumDst = MI.getNumOperands() - 1;
500 
501  MachineOperand &Src = MI.getOperand(NumDst);
502 
503  Register SrcReg = Src.getReg();
504  Register DstReg0 = MI.getOperand(0).getReg();
505  LLT DstTy = MRI->getType(DstReg0);
506  LLT SrcTy = MRI->getType(SrcReg);
507 
508  const unsigned DstSize = DstTy.getSizeInBits();
509  const unsigned SrcSize = SrcTy.getSizeInBits();
510  const DebugLoc &DL = MI.getDebugLoc();
511  const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
512 
513  const TargetRegisterClass *SrcRC =
514  TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
515  if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
516  return false;
517 
518  const unsigned SrcFlags = getUndefRegState(Src.isUndef());
519 
520  // Note we could have mixed SGPR and VGPR destination banks for an SGPR
521  // source, and this relies on the fact that the same subregister indices are
522  // used for both.
523  ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
524  for (int I = 0, E = NumDst; I != E; ++I) {
525  MachineOperand &Dst = MI.getOperand(I);
526  BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
527  .addReg(SrcReg, SrcFlags, SubRegs[I]);
528 
529  const TargetRegisterClass *DstRC =
530  TRI.getConstrainedRegClassForOperand(Dst, *MRI);
531  if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI))
532  return false;
533  }
534 
535  MI.eraseFromParent();
536  return true;
537 }
538 
539 bool AMDGPUInstructionSelector::selectG_GEP(MachineInstr &I) const {
540  return selectG_ADD_SUB(I);
541 }
542 
543 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
544  const MachineOperand &MO = I.getOperand(0);
545 
546  // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
547  // regbank check here is to know why getConstrainedRegClassForOperand failed.
548  const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI);
549  if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) ||
550  (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) {
551  I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
552  return true;
553  }
554 
555  return false;
556 }
557 
558 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
559  MachineBasicBlock *BB = I.getParent();
560 
561  Register DstReg = I.getOperand(0).getReg();
562  Register Src0Reg = I.getOperand(1).getReg();
563  Register Src1Reg = I.getOperand(2).getReg();
564  LLT Src1Ty = MRI->getType(Src1Reg);
565 
566  unsigned DstSize = MRI->getType(DstReg).getSizeInBits();
567  unsigned InsSize = Src1Ty.getSizeInBits();
568 
569  int64_t Offset = I.getOperand(3).getImm();
570  if (Offset % 32 != 0)
571  return false;
572 
573  unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32);
574  if (SubReg == AMDGPU::NoSubRegister)
575  return false;
576 
577  const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
578  const TargetRegisterClass *DstRC =
579  TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
580  if (!DstRC)
581  return false;
582 
583  const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI);
584  const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI);
585  const TargetRegisterClass *Src0RC =
586  TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank, *MRI);
587  const TargetRegisterClass *Src1RC =
588  TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank, *MRI);
589 
590  // Deal with weird cases where the class only partially supports the subreg
591  // index.
592  Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg);
593  if (!Src0RC)
594  return false;
595 
596  if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
597  !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) ||
598  !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI))
599  return false;
600 
601  const DebugLoc &DL = I.getDebugLoc();
602  BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
603  .addReg(Src0Reg)
604  .addReg(Src1Reg)
605  .addImm(SubReg);
606 
607  I.eraseFromParent();
608  return true;
609 }
610 
611 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
612  unsigned IntrinsicID = I.getIntrinsicID();
613  switch (IntrinsicID) {
614  case Intrinsic::amdgcn_if_break: {
615  MachineBasicBlock *BB = I.getParent();
616 
617  // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
618  // SelectionDAG uses for wave32 vs wave64.
619  BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
620  .add(I.getOperand(0))
621  .add(I.getOperand(2))
622  .add(I.getOperand(3));
623 
624  Register DstReg = I.getOperand(0).getReg();
625  Register Src0Reg = I.getOperand(2).getReg();
626  Register Src1Reg = I.getOperand(3).getReg();
627 
628  I.eraseFromParent();
629 
630  for (Register Reg : { DstReg, Src0Reg, Src1Reg }) {
631  if (!MRI->getRegClassOrNull(Reg))
632  MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
633  }
634 
635  return true;
636  }
637  default:
638  return selectImpl(I, *CoverageInfo);
639  }
640 }
641 
642 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) {
643  if (Size != 32 && Size != 64)
644  return -1;
645  switch (P) {
646  default:
647  llvm_unreachable("Unknown condition code!");
648  case CmpInst::ICMP_NE:
649  return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64;
650  case CmpInst::ICMP_EQ:
651  return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64;
652  case CmpInst::ICMP_SGT:
653  return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64;
654  case CmpInst::ICMP_SGE:
655  return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64;
656  case CmpInst::ICMP_SLT:
657  return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64;
658  case CmpInst::ICMP_SLE:
659  return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64;
660  case CmpInst::ICMP_UGT:
661  return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64;
662  case CmpInst::ICMP_UGE:
663  return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64;
664  case CmpInst::ICMP_ULT:
665  return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64;
666  case CmpInst::ICMP_ULE:
667  return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64;
668  }
669 }
670 
671 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
672  unsigned Size) const {
673  if (Size == 64) {
674  if (!STI.hasScalarCompareEq64())
675  return -1;
676 
677  switch (P) {
678  case CmpInst::ICMP_NE:
679  return AMDGPU::S_CMP_LG_U64;
680  case CmpInst::ICMP_EQ:
681  return AMDGPU::S_CMP_EQ_U64;
682  default:
683  return -1;
684  }
685  }
686 
687  if (Size != 32)
688  return -1;
689 
690  switch (P) {
691  case CmpInst::ICMP_NE:
692  return AMDGPU::S_CMP_LG_U32;
693  case CmpInst::ICMP_EQ:
694  return AMDGPU::S_CMP_EQ_U32;
695  case CmpInst::ICMP_SGT:
696  return AMDGPU::S_CMP_GT_I32;
697  case CmpInst::ICMP_SGE:
698  return AMDGPU::S_CMP_GE_I32;
699  case CmpInst::ICMP_SLT:
700  return AMDGPU::S_CMP_LT_I32;
701  case CmpInst::ICMP_SLE:
702  return AMDGPU::S_CMP_LE_I32;
703  case CmpInst::ICMP_UGT:
704  return AMDGPU::S_CMP_GT_U32;
705  case CmpInst::ICMP_UGE:
706  return AMDGPU::S_CMP_GE_U32;
707  case CmpInst::ICMP_ULT:
708  return AMDGPU::S_CMP_LT_U32;
709  case CmpInst::ICMP_ULE:
710  return AMDGPU::S_CMP_LE_U32;
711  default:
712  llvm_unreachable("Unknown condition code!");
713  }
714 }
715 
716 bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
717  MachineBasicBlock *BB = I.getParent();
718  const DebugLoc &DL = I.getDebugLoc();
719 
720  Register SrcReg = I.getOperand(2).getReg();
721  unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
722 
723  auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
724 
725  Register CCReg = I.getOperand(0).getReg();
726  if (isSCC(CCReg, *MRI)) {
727  int Opcode = getS_CMPOpcode(Pred, Size);
728  if (Opcode == -1)
729  return false;
730  MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
731  .add(I.getOperand(2))
732  .add(I.getOperand(3));
733  BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
734  .addReg(AMDGPU::SCC);
735  bool Ret =
736  constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
737  RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI);
738  I.eraseFromParent();
739  return Ret;
740  }
741 
742  int Opcode = getV_CMPOpcode(Pred, Size);
743  if (Opcode == -1)
744  return false;
745 
746  MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
747  I.getOperand(0).getReg())
748  .add(I.getOperand(2))
749  .add(I.getOperand(3));
751  *TRI.getBoolRC(), *MRI);
752  bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
753  I.eraseFromParent();
754  return Ret;
755 }
756 
757 static MachineInstr *
758 buildEXP(const TargetInstrInfo &TII, MachineInstr *Insert, unsigned Tgt,
759  unsigned Reg0, unsigned Reg1, unsigned Reg2, unsigned Reg3,
760  unsigned VM, bool Compr, unsigned Enabled, bool Done) {
761  const DebugLoc &DL = Insert->getDebugLoc();
762  MachineBasicBlock &BB = *Insert->getParent();
763  unsigned Opcode = Done ? AMDGPU::EXP_DONE : AMDGPU::EXP;
764  return BuildMI(BB, Insert, DL, TII.get(Opcode))
765  .addImm(Tgt)
766  .addReg(Reg0)
767  .addReg(Reg1)
768  .addReg(Reg2)
769  .addReg(Reg3)
770  .addImm(VM)
771  .addImm(Compr)
772  .addImm(Enabled);
773 }
774 
775 static bool isZero(Register Reg, MachineRegisterInfo &MRI) {
776  int64_t C;
777  if (mi_match(Reg, MRI, m_ICst(C)) && C == 0)
778  return true;
779 
780  // FIXME: matcher should ignore copies
781  return mi_match(Reg, MRI, m_Copy(m_ICst(C))) && C == 0;
782 }
783 
784 static unsigned extractGLC(unsigned AuxiliaryData) {
785  return AuxiliaryData & 1;
786 }
787 
788 static unsigned extractSLC(unsigned AuxiliaryData) {
789  return (AuxiliaryData >> 1) & 1;
790 }
791 
792 static unsigned extractDLC(unsigned AuxiliaryData) {
793  return (AuxiliaryData >> 2) & 1;
794 }
795 
796 static unsigned extractSWZ(unsigned AuxiliaryData) {
797  return (AuxiliaryData >> 3) & 1;
798 }
799 
800 // Returns Base register, constant offset, and offset def point.
801 static std::tuple<Register, unsigned, MachineInstr *>
804  if (!Def)
805  return std::make_tuple(Reg, 0, nullptr);
806 
807  if (Def->getOpcode() == AMDGPU::G_CONSTANT) {
808  unsigned Offset;
809  const MachineOperand &Op = Def->getOperand(1);
810  if (Op.isImm())
811  Offset = Op.getImm();
812  else
813  Offset = Op.getCImm()->getZExtValue();
814 
815  return std::make_tuple(Register(), Offset, Def);
816  }
817 
818  int64_t Offset;
819  if (Def->getOpcode() == AMDGPU::G_ADD) {
820  // TODO: Handle G_OR used for add case
821  if (mi_match(Def->getOperand(1).getReg(), MRI, m_ICst(Offset)))
822  return std::make_tuple(Def->getOperand(0).getReg(), Offset, Def);
823 
824  // FIXME: matcher should ignore copies
825  if (mi_match(Def->getOperand(1).getReg(), MRI, m_Copy(m_ICst(Offset))))
826  return std::make_tuple(Def->getOperand(0).getReg(), Offset, Def);
827  }
828 
829  return std::make_tuple(Reg, 0, Def);
830 }
831 
832 static unsigned getBufferStoreOpcode(LLT Ty,
833  const unsigned MemSize,
834  const bool Offen) {
835  const int Size = Ty.getSizeInBits();
836  switch (8 * MemSize) {
837  case 8:
838  return Offen ? AMDGPU::BUFFER_STORE_BYTE_OFFEN_exact :
839  AMDGPU::BUFFER_STORE_BYTE_OFFSET_exact;
840  case 16:
841  return Offen ? AMDGPU::BUFFER_STORE_SHORT_OFFEN_exact :
842  AMDGPU::BUFFER_STORE_SHORT_OFFSET_exact;
843  default:
844  unsigned Opc = Offen ? AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact :
845  AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact;
846  if (Size > 32)
847  Opc = AMDGPU::getMUBUFOpcode(Opc, Size / 32);
848  return Opc;
849  }
850 }
851 
852 static unsigned getBufferStoreFormatOpcode(LLT Ty,
853  const unsigned MemSize,
854  const bool Offen) {
855  bool IsD16Packed = Ty.getScalarSizeInBits() == 16;
856  bool IsD16Unpacked = 8 * MemSize < Ty.getSizeInBits();
857  int NumElts = Ty.isVector() ? Ty.getNumElements() : 1;
858 
859  if (IsD16Packed) {
860  switch (NumElts) {
861  case 1:
862  return Offen ? AMDGPU::BUFFER_STORE_FORMAT_D16_X_OFFEN_exact :
863  AMDGPU::BUFFER_STORE_FORMAT_D16_X_OFFSET_exact;
864  case 2:
865  return Offen ? AMDGPU::BUFFER_STORE_FORMAT_D16_XY_OFFEN_exact :
866  AMDGPU::BUFFER_STORE_FORMAT_D16_XY_OFFSET_exact;
867  case 3:
868  return Offen ? AMDGPU::BUFFER_STORE_FORMAT_D16_XYZ_OFFEN_exact :
869  AMDGPU::BUFFER_STORE_FORMAT_D16_XYZ_OFFSET_exact;
870  case 4:
871  return Offen ? AMDGPU::BUFFER_STORE_FORMAT_D16_XYZW_OFFEN_exact :
872  AMDGPU::BUFFER_STORE_FORMAT_D16_XYZW_OFFSET_exact;
873  default:
874  return -1;
875  }
876  }
877 
878  if (IsD16Unpacked) {
879  switch (NumElts) {
880  case 1:
881  return Offen ? AMDGPU::BUFFER_STORE_FORMAT_D16_X_OFFEN_exact :
882  AMDGPU::BUFFER_STORE_FORMAT_D16_X_OFFSET_exact;
883  case 2:
884  return Offen ? AMDGPU::BUFFER_STORE_FORMAT_D16_XY_gfx80_OFFEN_exact :
885  AMDGPU::BUFFER_STORE_FORMAT_D16_XY_gfx80_OFFSET_exact;
886  case 3:
887  return Offen ? AMDGPU::BUFFER_STORE_FORMAT_D16_XYZ_gfx80_OFFEN_exact :
888  AMDGPU::BUFFER_STORE_FORMAT_D16_XYZ_gfx80_OFFSET_exact;
889  case 4:
890  return Offen ? AMDGPU::BUFFER_STORE_FORMAT_D16_XYZW_gfx80_OFFEN_exact :
891  AMDGPU::BUFFER_STORE_FORMAT_D16_XYZW_gfx80_OFFSET_exact;
892  default:
893  return -1;
894  }
895  }
896 
897  switch (NumElts) {
898  case 1:
899  return Offen ? AMDGPU::BUFFER_STORE_FORMAT_X_OFFEN_exact :
900  AMDGPU::BUFFER_STORE_FORMAT_X_OFFSET_exact;
901  case 2:
902  return Offen ? AMDGPU::BUFFER_STORE_FORMAT_XY_OFFEN_exact :
903  AMDGPU::BUFFER_STORE_FORMAT_XY_OFFSET_exact;
904  case 3:
905  return Offen ? AMDGPU::BUFFER_STORE_FORMAT_XYZ_OFFEN_exact :
906  AMDGPU::BUFFER_STORE_FORMAT_XYZ_OFFSET_exact;
907  case 4:
908  return Offen ? AMDGPU::BUFFER_STORE_FORMAT_XYZW_OFFEN_exact :
909  AMDGPU::BUFFER_STORE_FORMAT_XYZW_OFFSET_exact;
910  default:
911  return -1;
912  }
913 
914  llvm_unreachable("unhandled buffer store");
915 }
916 
917 // TODO: Move this to combiner
918 // Returns base register, imm offset, total constant offset.
919 std::tuple<Register, unsigned, unsigned>
920 AMDGPUInstructionSelector::splitBufferOffsets(MachineIRBuilder &B,
921  Register OrigOffset) const {
922  const unsigned MaxImm = 4095;
923  Register BaseReg;
924  unsigned TotalConstOffset;
925  MachineInstr *OffsetDef;
926 
927  std::tie(BaseReg, TotalConstOffset, OffsetDef)
928  = getBaseWithConstantOffset(*MRI, OrigOffset);
929 
930  unsigned ImmOffset = TotalConstOffset;
931 
932  // If the immediate value is too big for the immoffset field, put the value
933  // and -4096 into the immoffset field so that the value that is copied/added
934  // for the voffset field is a multiple of 4096, and it stands more chance
935  // of being CSEd with the copy/add for another similar load/store.f
936  // However, do not do that rounding down to a multiple of 4096 if that is a
937  // negative number, as it appears to be illegal to have a negative offset
938  // in the vgpr, even if adding the immediate offset makes it positive.
939  unsigned Overflow = ImmOffset & ~MaxImm;
940  ImmOffset -= Overflow;
941  if ((int32_t)Overflow < 0) {
942  Overflow += ImmOffset;
943  ImmOffset = 0;
944  }
945 
946  if (Overflow != 0) {
947  // In case this is in a waterfall loop, insert offset code at the def point
948  // of the offset, not inside the loop.
950  MachineBasicBlock &OldMBB = B.getMBB();
951  B.setInstr(*OffsetDef);
952 
953  if (!BaseReg) {
954  BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
955  B.buildInstr(AMDGPU::V_MOV_B32_e32)
956  .addDef(BaseReg)
957  .addImm(Overflow);
958  } else {
959  Register OverflowVal = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
960  B.buildInstr(AMDGPU::V_MOV_B32_e32)
961  .addDef(OverflowVal)
962  .addImm(Overflow);
963 
964  Register NewBaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
965  TII.getAddNoCarry(B.getMBB(), B.getInsertPt(), B.getDebugLoc(), NewBaseReg)
966  .addReg(BaseReg)
967  .addReg(OverflowVal, RegState::Kill)
968  .addImm(0);
969  BaseReg = NewBaseReg;
970  }
971 
972  B.setInsertPt(OldMBB, OldInsPt);
973  }
974 
975  return std::make_tuple(BaseReg, ImmOffset, TotalConstOffset);
976 }
977 
978 bool AMDGPUInstructionSelector::selectStoreIntrinsic(MachineInstr &MI,
979  bool IsFormat) const {
980  MachineIRBuilder B(MI);
981  MachineFunction &MF = B.getMF();
982  Register VData = MI.getOperand(1).getReg();
983  LLT Ty = MRI->getType(VData);
984 
985  int Size = Ty.getSizeInBits();
986  if (Size % 32 != 0)
987  return false;
988 
989  // FIXME: Verifier should enforce 1 MMO for these intrinsics.
991  const int MemSize = MMO->getSize();
992 
993  Register RSrc = MI.getOperand(2).getReg();
994  Register VOffset = MI.getOperand(3).getReg();
995  Register SOffset = MI.getOperand(4).getReg();
996  unsigned AuxiliaryData = MI.getOperand(5).getImm();
997  unsigned ImmOffset;
998  unsigned TotalOffset;
999 
1000  std::tie(VOffset, ImmOffset, TotalOffset) = splitBufferOffsets(B, VOffset);
1001  if (TotalOffset != 0)
1002  MMO = MF.getMachineMemOperand(MMO, TotalOffset, MemSize);
1003 
1004  const bool Offen = !isZero(VOffset, *MRI);
1005 
1006  int Opc = IsFormat ? getBufferStoreFormatOpcode(Ty, MemSize, Offen) :
1007  getBufferStoreOpcode(Ty, MemSize, Offen);
1008  if (Opc == -1)
1009  return false;
1010 
1011  MachineInstrBuilder MIB = B.buildInstr(Opc)
1012  .addUse(VData);
1013 
1014  if (Offen)
1015  MIB.addUse(VOffset);
1016 
1017  MIB.addUse(RSrc)
1018  .addUse(SOffset)
1019  .addImm(ImmOffset)
1020  .addImm(extractGLC(AuxiliaryData))
1021  .addImm(extractSLC(AuxiliaryData))
1022  .addImm(0) // tfe: FIXME: Remove from inst
1023  .addImm(extractDLC(AuxiliaryData))
1024  .addImm(extractSWZ(AuxiliaryData))
1025  .addMemOperand(MMO);
1026 
1027  MI.eraseFromParent();
1028 
1029  return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1030 }
1031 
1032 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
1033  MachineInstr &I) const {
1034  MachineBasicBlock *BB = I.getParent();
1035  unsigned IntrinsicID = I.getIntrinsicID();
1036  switch (IntrinsicID) {
1037  case Intrinsic::amdgcn_exp: {
1038  int64_t Tgt = I.getOperand(1).getImm();
1039  int64_t Enabled = I.getOperand(2).getImm();
1040  int64_t Done = I.getOperand(7).getImm();
1041  int64_t VM = I.getOperand(8).getImm();
1042 
1043  MachineInstr *Exp = buildEXP(TII, &I, Tgt, I.getOperand(3).getReg(),
1044  I.getOperand(4).getReg(),
1045  I.getOperand(5).getReg(),
1046  I.getOperand(6).getReg(),
1047  VM, false, Enabled, Done);
1048 
1049  I.eraseFromParent();
1050  return constrainSelectedInstRegOperands(*Exp, TII, TRI, RBI);
1051  }
1052  case Intrinsic::amdgcn_exp_compr: {
1053  const DebugLoc &DL = I.getDebugLoc();
1054  int64_t Tgt = I.getOperand(1).getImm();
1055  int64_t Enabled = I.getOperand(2).getImm();
1056  Register Reg0 = I.getOperand(3).getReg();
1057  Register Reg1 = I.getOperand(4).getReg();
1058  Register Undef = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1059  int64_t Done = I.getOperand(5).getImm();
1060  int64_t VM = I.getOperand(6).getImm();
1061 
1062  BuildMI(*BB, &I, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef);
1063  MachineInstr *Exp = buildEXP(TII, &I, Tgt, Reg0, Reg1, Undef, Undef, VM,
1064  true, Enabled, Done);
1065 
1066  I.eraseFromParent();
1067  return constrainSelectedInstRegOperands(*Exp, TII, TRI, RBI);
1068  }
1069  case Intrinsic::amdgcn_end_cf: {
1070  // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
1071  // SelectionDAG uses for wave32 vs wave64.
1072  BuildMI(*BB, &I, I.getDebugLoc(),
1073  TII.get(AMDGPU::SI_END_CF))
1074  .add(I.getOperand(1));
1075 
1076  Register Reg = I.getOperand(1).getReg();
1077  I.eraseFromParent();
1078 
1079  if (!MRI->getRegClassOrNull(Reg))
1080  MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
1081  return true;
1082  }
1083  case Intrinsic::amdgcn_raw_buffer_store:
1084  return selectStoreIntrinsic(I, false);
1085  case Intrinsic::amdgcn_raw_buffer_store_format:
1086  return selectStoreIntrinsic(I, true);
1087  default:
1088  return selectImpl(I, *CoverageInfo);
1089  }
1090 }
1091 
1092 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
1093  MachineBasicBlock *BB = I.getParent();
1094  const DebugLoc &DL = I.getDebugLoc();
1095 
1096  Register DstReg = I.getOperand(0).getReg();
1097  unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
1098  assert(Size <= 32 || Size == 64);
1099  const MachineOperand &CCOp = I.getOperand(1);
1100  Register CCReg = CCOp.getReg();
1101  if (isSCC(CCReg, *MRI)) {
1102  unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
1103  AMDGPU::S_CSELECT_B32;
1104  MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
1105  .addReg(CCReg);
1106 
1107  // The generic constrainSelectedInstRegOperands doesn't work for the scc register
1108  // bank, because it does not cover the register class that we used to represent
1109  // for it. So we need to manually set the register class here.
1110  if (!MRI->getRegClassOrNull(CCReg))
1111  MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
1112  MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
1113  .add(I.getOperand(2))
1114  .add(I.getOperand(3));
1115 
1116  bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI) |
1117  constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
1118  I.eraseFromParent();
1119  return Ret;
1120  }
1121 
1122  // Wide VGPR select should have been split in RegBankSelect.
1123  if (Size > 32)
1124  return false;
1125 
1126  MachineInstr *Select =
1127  BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1128  .addImm(0)
1129  .add(I.getOperand(3))
1130  .addImm(0)
1131  .add(I.getOperand(2))
1132  .add(I.getOperand(1));
1133 
1134  bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1135  I.eraseFromParent();
1136  return Ret;
1137 }
1138 
1139 bool AMDGPUInstructionSelector::selectG_STORE(MachineInstr &I) const {
1140  initM0(I);
1141  return selectImpl(I, *CoverageInfo);
1142 }
1143 
1144 static int sizeToSubRegIndex(unsigned Size) {
1145  switch (Size) {
1146  case 32:
1147  return AMDGPU::sub0;
1148  case 64:
1149  return AMDGPU::sub0_sub1;
1150  case 96:
1151  return AMDGPU::sub0_sub1_sub2;
1152  case 128:
1153  return AMDGPU::sub0_sub1_sub2_sub3;
1154  case 256:
1155  return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
1156  default:
1157  if (Size < 32)
1158  return AMDGPU::sub0;
1159  if (Size > 256)
1160  return -1;
1161  return sizeToSubRegIndex(PowerOf2Ceil(Size));
1162  }
1163 }
1164 
1165 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
1166  Register DstReg = I.getOperand(0).getReg();
1167  Register SrcReg = I.getOperand(1).getReg();
1168  const LLT DstTy = MRI->getType(DstReg);
1169  const LLT SrcTy = MRI->getType(SrcReg);
1170  if (!DstTy.isScalar())
1171  return false;
1172 
1173  const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1174  const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1175  if (SrcRB != DstRB)
1176  return false;
1177 
1178  unsigned DstSize = DstTy.getSizeInBits();
1179  unsigned SrcSize = SrcTy.getSizeInBits();
1180 
1181  const TargetRegisterClass *SrcRC
1182  = TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB, *MRI);
1183  const TargetRegisterClass *DstRC
1184  = TRI.getRegClassForSizeOnBank(DstSize, *DstRB, *MRI);
1185 
1186  if (SrcSize > 32) {
1187  int SubRegIdx = sizeToSubRegIndex(DstSize);
1188  if (SubRegIdx == -1)
1189  return false;
1190 
1191  // Deal with weird cases where the class only partially supports the subreg
1192  // index.
1193  SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
1194  if (!SrcRC)
1195  return false;
1196 
1197  I.getOperand(1).setSubReg(SubRegIdx);
1198  }
1199 
1200  if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
1201  !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1202  LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
1203  return false;
1204  }
1205 
1206  I.setDesc(TII.get(TargetOpcode::COPY));
1207  return true;
1208 }
1209 
1210 /// \returns true if a bitmask for \p Size bits will be an inline immediate.
1211 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
1212  Mask = maskTrailingOnes<unsigned>(Size);
1213  int SignedMask = static_cast<int>(Mask);
1214  return SignedMask >= -16 && SignedMask <= 64;
1215 }
1216 
1217 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
1218  bool Signed = I.getOpcode() == AMDGPU::G_SEXT;
1219  const DebugLoc &DL = I.getDebugLoc();
1220  MachineBasicBlock &MBB = *I.getParent();
1221  const Register DstReg = I.getOperand(0).getReg();
1222  const Register SrcReg = I.getOperand(1).getReg();
1223 
1224  const LLT DstTy = MRI->getType(DstReg);
1225  const LLT SrcTy = MRI->getType(SrcReg);
1226  const LLT S1 = LLT::scalar(1);
1227  const unsigned SrcSize = SrcTy.getSizeInBits();
1228  const unsigned DstSize = DstTy.getSizeInBits();
1229  if (!DstTy.isScalar())
1230  return false;
1231 
1232  const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
1233 
1234  if (SrcBank->getID() == AMDGPU::SCCRegBankID) {
1235  if (SrcTy != S1 || DstSize > 64) // Invalid
1236  return false;
1237 
1238  unsigned Opcode =
1239  DstSize > 32 ? AMDGPU::S_CSELECT_B64 : AMDGPU::S_CSELECT_B32;
1240  const TargetRegisterClass *DstRC =
1241  DstSize > 32 ? &AMDGPU::SReg_64RegClass : &AMDGPU::SReg_32RegClass;
1242 
1243  // FIXME: Create an extra copy to avoid incorrectly constraining the result
1244  // of the scc producer.
1245  Register TmpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1246  BuildMI(MBB, I, DL, TII.get(AMDGPU::COPY), TmpReg)
1247  .addReg(SrcReg);
1248  BuildMI(MBB, I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
1249  .addReg(TmpReg);
1250 
1251  // The instruction operands are backwards from what you would expect.
1252  BuildMI(MBB, I, DL, TII.get(Opcode), DstReg)
1253  .addImm(0)
1254  .addImm(Signed ? -1 : 1);
1255  I.eraseFromParent();
1256  return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
1257  }
1258 
1259  if (SrcBank->getID() == AMDGPU::VCCRegBankID && DstSize <= 32) {
1260  if (SrcTy != S1) // Invalid
1261  return false;
1262 
1263  MachineInstr *ExtI =
1264  BuildMI(MBB, I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1265  .addImm(0) // src0_modifiers
1266  .addImm(0) // src0
1267  .addImm(0) // src1_modifiers
1268  .addImm(Signed ? -1 : 1) // src1
1269  .addUse(SrcReg);
1270  I.eraseFromParent();
1271  return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
1272  }
1273 
1274  if (I.getOpcode() == AMDGPU::G_ANYEXT)
1275  return selectCOPY(I);
1276 
1277  if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
1278  // 64-bit should have been split up in RegBankSelect
1279 
1280  // Try to use an and with a mask if it will save code size.
1281  unsigned Mask;
1282  if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
1283  MachineInstr *ExtI =
1284  BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
1285  .addImm(Mask)
1286  .addReg(SrcReg);
1287  I.eraseFromParent();
1288  return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
1289  }
1290 
1291  const unsigned BFE = Signed ? AMDGPU::V_BFE_I32 : AMDGPU::V_BFE_U32;
1292  MachineInstr *ExtI =
1293  BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
1294  .addReg(SrcReg)
1295  .addImm(0) // Offset
1296  .addImm(SrcSize); // Width
1297  I.eraseFromParent();
1298  return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
1299  }
1300 
1301  if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
1302  if (!RBI.constrainGenericRegister(SrcReg, AMDGPU::SReg_32RegClass, *MRI))
1303  return false;
1304 
1305  if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
1306  const unsigned SextOpc = SrcSize == 8 ?
1307  AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
1308  BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
1309  .addReg(SrcReg);
1310  I.eraseFromParent();
1311  return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
1312  }
1313 
1314  const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
1315  const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
1316 
1317  // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
1318  if (DstSize > 32 && SrcSize <= 32) {
1319  // We need a 64-bit register source, but the high bits don't matter.
1320  Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
1321  Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1322  BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
1323  BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
1324  .addReg(SrcReg)
1325  .addImm(AMDGPU::sub0)
1326  .addReg(UndefReg)
1327  .addImm(AMDGPU::sub1);
1328 
1329  BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
1330  .addReg(ExtReg)
1331  .addImm(SrcSize << 16);
1332 
1333  I.eraseFromParent();
1334  return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI);
1335  }
1336 
1337  unsigned Mask;
1338  if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
1339  BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
1340  .addReg(SrcReg)
1341  .addImm(Mask);
1342  } else {
1343  BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
1344  .addReg(SrcReg)
1345  .addImm(SrcSize << 16);
1346  }
1347 
1348  I.eraseFromParent();
1349  return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
1350  }
1351 
1352  return false;
1353 }
1354 
1355 static int64_t getFPTrueImmVal(unsigned Size, bool Signed) {
1356  switch (Size) {
1357  case 16:
1358  return Signed ? 0xBC00 : 0x3C00;
1359  case 32:
1360  return Signed ? 0xbf800000 : 0x3f800000;
1361  case 64:
1362  return Signed ? 0xbff0000000000000 : 0x3ff0000000000000;
1363  default:
1364  llvm_unreachable("Invalid FP type size");
1365  }
1366 }
1367 
1368 bool AMDGPUInstructionSelector::selectG_SITOFP_UITOFP(MachineInstr &I) const {
1369  MachineBasicBlock *MBB = I.getParent();
1370  MachineFunction *MF = MBB->getParent();
1371  MachineRegisterInfo &MRI = MF->getRegInfo();
1372  Register Src = I.getOperand(1).getReg();
1373  if (!isSCC(Src, MRI))
1374  return selectImpl(I, *CoverageInfo);
1375 
1376  bool Signed = I.getOpcode() == AMDGPU::G_SITOFP;
1377  Register DstReg = I.getOperand(0).getReg();
1378  const LLT DstTy = MRI.getType(DstReg);
1379  const unsigned DstSize = DstTy.getSizeInBits();
1380  const DebugLoc &DL = I.getDebugLoc();
1381 
1382  BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
1383  .addReg(Src);
1384 
1385  unsigned NewOpc =
1386  DstSize > 32 ? AMDGPU::S_CSELECT_B64 : AMDGPU::S_CSELECT_B32;
1387  auto MIB = BuildMI(*MBB, I, DL, TII.get(NewOpc), DstReg)
1388  .addImm(0)
1389  .addImm(getFPTrueImmVal(DstSize, Signed));
1390 
1391  if (!constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI))
1392  return false;
1393 
1394  I.eraseFromParent();
1395  return true;
1396 }
1397 
1398 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
1399  MachineBasicBlock *BB = I.getParent();
1400  MachineOperand &ImmOp = I.getOperand(1);
1401 
1402  // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
1403  if (ImmOp.isFPImm()) {
1404  const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
1405  ImmOp.ChangeToImmediate(Imm.getZExtValue());
1406  } else if (ImmOp.isCImm()) {
1407  ImmOp.ChangeToImmediate(ImmOp.getCImm()->getZExtValue());
1408  }
1409 
1410  Register DstReg = I.getOperand(0).getReg();
1411  unsigned Size;
1412  bool IsSgpr;
1413  const RegisterBank *RB = MRI->getRegBankOrNull(I.getOperand(0).getReg());
1414  if (RB) {
1415  IsSgpr = RB->getID() == AMDGPU::SGPRRegBankID;
1416  Size = MRI->getType(DstReg).getSizeInBits();
1417  } else {
1418  const TargetRegisterClass *RC = TRI.getRegClassForReg(*MRI, DstReg);
1419  IsSgpr = TRI.isSGPRClass(RC);
1420  Size = TRI.getRegSizeInBits(*RC);
1421  }
1422 
1423  if (Size != 32 && Size != 64)
1424  return false;
1425 
1426  unsigned Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1427  if (Size == 32) {
1428  I.setDesc(TII.get(Opcode));
1430  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1431  }
1432 
1433  const DebugLoc &DL = I.getDebugLoc();
1434 
1435  APInt Imm(Size, I.getOperand(1).getImm());
1436 
1437  MachineInstr *ResInst;
1438  if (IsSgpr && TII.isInlineConstant(Imm)) {
1439  ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
1440  .addImm(I.getOperand(1).getImm());
1441  } else {
1442  const TargetRegisterClass *RC = IsSgpr ?
1443  &AMDGPU::SReg_32_XM0RegClass : &AMDGPU::VGPR_32RegClass;
1444  Register LoReg = MRI->createVirtualRegister(RC);
1445  Register HiReg = MRI->createVirtualRegister(RC);
1446 
1447  BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
1448  .addImm(Imm.trunc(32).getZExtValue());
1449 
1450  BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
1451  .addImm(Imm.ashr(32).getZExtValue());
1452 
1453  ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
1454  .addReg(LoReg)
1455  .addImm(AMDGPU::sub0)
1456  .addReg(HiReg)
1457  .addImm(AMDGPU::sub1);
1458  }
1459 
1460  // We can't call constrainSelectedInstRegOperands here, because it doesn't
1461  // work for target independent opcodes
1462  I.eraseFromParent();
1463  const TargetRegisterClass *DstRC =
1464  TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
1465  if (!DstRC)
1466  return true;
1467  return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
1468 }
1469 
1470 static bool isConstant(const MachineInstr &MI) {
1471  return MI.getOpcode() == TargetOpcode::G_CONSTANT;
1472 }
1473 
1474 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
1475  const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
1476 
1477  const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
1478 
1479  assert(PtrMI);
1480 
1481  if (PtrMI->getOpcode() != TargetOpcode::G_GEP)
1482  return;
1483 
1484  GEPInfo GEPInfo(*PtrMI);
1485 
1486  for (unsigned i = 1; i != 3; ++i) {
1487  const MachineOperand &GEPOp = PtrMI->getOperand(i);
1488  const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
1489  assert(OpDef);
1490  if (i == 2 && isConstant(*OpDef)) {
1491  // TODO: Could handle constant base + variable offset, but a combine
1492  // probably should have commuted it.
1493  assert(GEPInfo.Imm == 0);
1494  GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
1495  continue;
1496  }
1497  const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
1498  if (OpBank->getID() == AMDGPU::SGPRRegBankID)
1499  GEPInfo.SgprParts.push_back(GEPOp.getReg());
1500  else
1501  GEPInfo.VgprParts.push_back(GEPOp.getReg());
1502  }
1503 
1504  AddrInfo.push_back(GEPInfo);
1505  getAddrModeInfo(*PtrMI, MRI, AddrInfo);
1506 }
1507 
1508 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
1509  if (!MI.hasOneMemOperand())
1510  return false;
1511 
1512  const MachineMemOperand *MMO = *MI.memoperands_begin();
1513  const Value *Ptr = MMO->getValue();
1514 
1515  // UndefValue means this is a load of a kernel input. These are uniform.
1516  // Sometimes LDS instructions have constant pointers.
1517  // If Ptr is null, then that means this mem operand contains a
1518  // PseudoSourceValue like GOT.
1519  if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
1520  isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
1521  return true;
1522 
1524  return true;
1525 
1526  const Instruction *I = dyn_cast<Instruction>(Ptr);
1527  return I && I->getMetadata("amdgpu.uniform");
1528 }
1529 
1530 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
1531  for (const GEPInfo &GEPInfo : AddrInfo) {
1532  if (!GEPInfo.VgprParts.empty())
1533  return true;
1534  }
1535  return false;
1536 }
1537 
1538 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
1539  MachineBasicBlock *BB = I.getParent();
1540 
1541  const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
1542  unsigned AS = PtrTy.getAddressSpace();
1543  if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
1544  STI.ldsRequiresM0Init()) {
1545  // If DS instructions require M0 initializtion, insert it before selecting.
1546  BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1547  .addImm(-1);
1548  }
1549 }
1550 
1551 bool AMDGPUInstructionSelector::selectG_LOAD_ATOMICRMW(MachineInstr &I) const {
1552  initM0(I);
1553  return selectImpl(I, *CoverageInfo);
1554 }
1555 
1556 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
1557  MachineBasicBlock *BB = I.getParent();
1558  MachineOperand &CondOp = I.getOperand(0);
1559  Register CondReg = CondOp.getReg();
1560  const DebugLoc &DL = I.getDebugLoc();
1561 
1562  unsigned BrOpcode;
1563  Register CondPhysReg;
1564  const TargetRegisterClass *ConstrainRC;
1565 
1566  // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
1567  // whether the branch is uniform when selecting the instruction. In
1568  // GlobalISel, we should push that decision into RegBankSelect. Assume for now
1569  // RegBankSelect knows what it's doing if the branch condition is scc, even
1570  // though it currently does not.
1571  if (isSCC(CondReg, *MRI)) {
1572  CondPhysReg = AMDGPU::SCC;
1573  BrOpcode = AMDGPU::S_CBRANCH_SCC1;
1574  ConstrainRC = &AMDGPU::SReg_32_XM0RegClass;
1575  } else if (isVCC(CondReg, *MRI)) {
1576  // FIXME: Do we have to insert an and with exec here, like in SelectionDAG?
1577  // We sort of know that a VCC producer based on the register bank, that ands
1578  // inactive lanes with 0. What if there was a logical operation with vcc
1579  // producers in different blocks/with different exec masks?
1580  // FIXME: Should scc->vcc copies and with exec?
1581  CondPhysReg = TRI.getVCC();
1582  BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
1583  ConstrainRC = TRI.getBoolRC();
1584  } else
1585  return false;
1586 
1587  if (!MRI->getRegClassOrNull(CondReg))
1588  MRI->setRegClass(CondReg, ConstrainRC);
1589 
1590  BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
1591  .addReg(CondReg);
1592  BuildMI(*BB, &I, DL, TII.get(BrOpcode))
1593  .addMBB(I.getOperand(1).getMBB());
1594 
1595  I.eraseFromParent();
1596  return true;
1597 }
1598 
1599 bool AMDGPUInstructionSelector::selectG_FRAME_INDEX(MachineInstr &I) const {
1600  Register DstReg = I.getOperand(0).getReg();
1601  const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1602  const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
1603  I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
1604  if (IsVGPR)
1605  I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
1606 
1607  return RBI.constrainGenericRegister(
1608  DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI);
1609 }
1610 
1611 bool AMDGPUInstructionSelector::selectG_PTR_MASK(MachineInstr &I) const {
1612  uint64_t Align = I.getOperand(2).getImm();
1613  const uint64_t Mask = ~((UINT64_C(1) << Align) - 1);
1614 
1615  MachineBasicBlock *BB = I.getParent();
1616 
1617  Register DstReg = I.getOperand(0).getReg();
1618  Register SrcReg = I.getOperand(1).getReg();
1619 
1620  const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1621  const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1622  const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
1623  unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
1624  unsigned MovOpc = IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1625  const TargetRegisterClass &RegRC
1626  = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
1627 
1628  LLT Ty = MRI->getType(DstReg);
1629 
1630  const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB,
1631  *MRI);
1632  const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB,
1633  *MRI);
1634  if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
1635  !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
1636  return false;
1637 
1638  const DebugLoc &DL = I.getDebugLoc();
1639  Register ImmReg = MRI->createVirtualRegister(&RegRC);
1640  BuildMI(*BB, &I, DL, TII.get(MovOpc), ImmReg)
1641  .addImm(Mask);
1642 
1643  if (Ty.getSizeInBits() == 32) {
1644  BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg)
1645  .addReg(SrcReg)
1646  .addReg(ImmReg);
1647  I.eraseFromParent();
1648  return true;
1649  }
1650 
1651  Register HiReg = MRI->createVirtualRegister(&RegRC);
1652  Register LoReg = MRI->createVirtualRegister(&RegRC);
1653  Register MaskLo = MRI->createVirtualRegister(&RegRC);
1654 
1655  BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg)
1656  .addReg(SrcReg, 0, AMDGPU::sub0);
1657  BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg)
1658  .addReg(SrcReg, 0, AMDGPU::sub1);
1659 
1660  BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskLo)
1661  .addReg(LoReg)
1662  .addReg(ImmReg);
1663  BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
1664  .addReg(MaskLo)
1665  .addImm(AMDGPU::sub0)
1666  .addReg(HiReg)
1667  .addImm(AMDGPU::sub1);
1668  I.eraseFromParent();
1669  return true;
1670 }
1671 
1673  if (I.isPHI())
1674  return selectPHI(I);
1675 
1676  if (!I.isPreISelOpcode()) {
1677  if (I.isCopy())
1678  return selectCOPY(I);
1679  return true;
1680  }
1681 
1682  switch (I.getOpcode()) {
1683  case TargetOpcode::G_AND:
1684  case TargetOpcode::G_OR:
1685  case TargetOpcode::G_XOR:
1686  if (selectG_AND_OR_XOR(I))
1687  return true;
1688  return selectImpl(I, *CoverageInfo);
1689  case TargetOpcode::G_ADD:
1690  case TargetOpcode::G_SUB:
1691  if (selectImpl(I, *CoverageInfo))
1692  return true;
1693  return selectG_ADD_SUB(I);
1694  case TargetOpcode::G_UADDO:
1695  case TargetOpcode::G_USUBO:
1696  return selectG_UADDO_USUBO(I);
1697  case TargetOpcode::G_INTTOPTR:
1698  case TargetOpcode::G_BITCAST:
1699  case TargetOpcode::G_PTRTOINT:
1700  return selectCOPY(I);
1701  case TargetOpcode::G_CONSTANT:
1702  case TargetOpcode::G_FCONSTANT:
1703  return selectG_CONSTANT(I);
1704  case TargetOpcode::G_EXTRACT:
1705  return selectG_EXTRACT(I);
1706  case TargetOpcode::G_MERGE_VALUES:
1707  case TargetOpcode::G_BUILD_VECTOR:
1708  case TargetOpcode::G_CONCAT_VECTORS:
1709  return selectG_MERGE_VALUES(I);
1710  case TargetOpcode::G_UNMERGE_VALUES:
1711  return selectG_UNMERGE_VALUES(I);
1712  case TargetOpcode::G_GEP:
1713  return selectG_GEP(I);
1714  case TargetOpcode::G_IMPLICIT_DEF:
1715  return selectG_IMPLICIT_DEF(I);
1716  case TargetOpcode::G_INSERT:
1717  return selectG_INSERT(I);
1718  case TargetOpcode::G_INTRINSIC:
1719  return selectG_INTRINSIC(I);
1720  case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1721  return selectG_INTRINSIC_W_SIDE_EFFECTS(I);
1722  case TargetOpcode::G_ICMP:
1723  if (selectG_ICMP(I))
1724  return true;
1725  return selectImpl(I, *CoverageInfo);
1726  case TargetOpcode::G_LOAD:
1727  case TargetOpcode::G_ATOMIC_CMPXCHG:
1728  case TargetOpcode::G_ATOMICRMW_XCHG:
1729  case TargetOpcode::G_ATOMICRMW_ADD:
1730  case TargetOpcode::G_ATOMICRMW_SUB:
1731  case TargetOpcode::G_ATOMICRMW_AND:
1732  case TargetOpcode::G_ATOMICRMW_OR:
1733  case TargetOpcode::G_ATOMICRMW_XOR:
1734  case TargetOpcode::G_ATOMICRMW_MIN:
1735  case TargetOpcode::G_ATOMICRMW_MAX:
1736  case TargetOpcode::G_ATOMICRMW_UMIN:
1737  case TargetOpcode::G_ATOMICRMW_UMAX:
1738  case TargetOpcode::G_ATOMICRMW_FADD:
1739  return selectG_LOAD_ATOMICRMW(I);
1740  case TargetOpcode::G_SELECT:
1741  return selectG_SELECT(I);
1742  case TargetOpcode::G_STORE:
1743  return selectG_STORE(I);
1744  case TargetOpcode::G_TRUNC:
1745  return selectG_TRUNC(I);
1746  case TargetOpcode::G_SEXT:
1747  case TargetOpcode::G_ZEXT:
1748  case TargetOpcode::G_ANYEXT:
1749  return selectG_SZA_EXT(I);
1750  case TargetOpcode::G_SITOFP:
1751  case TargetOpcode::G_UITOFP:
1752  return selectG_SITOFP_UITOFP(I);
1753  case TargetOpcode::G_BRCOND:
1754  return selectG_BRCOND(I);
1755  case TargetOpcode::G_FRAME_INDEX:
1756  return selectG_FRAME_INDEX(I);
1757  case TargetOpcode::G_FENCE:
1758  // FIXME: Tablegen importer doesn't handle the imm operands correctly, and
1759  // is checking for G_CONSTANT
1760  I.setDesc(TII.get(AMDGPU::ATOMIC_FENCE));
1761  return true;
1762  case TargetOpcode::G_PTR_MASK:
1763  return selectG_PTR_MASK(I);
1764  default:
1765  return selectImpl(I, *CoverageInfo);
1766  }
1767  return false;
1768 }
1769 
1771 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
1772  return {{
1773  [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
1774  }};
1775 
1776 }
1777 
1778 std::pair<Register, unsigned>
1779 AMDGPUInstructionSelector::selectVOP3ModsImpl(
1780  Register Src) const {
1781  unsigned Mods = 0;
1782  MachineInstr *MI = MRI->getVRegDef(Src);
1783 
1784  if (MI && MI->getOpcode() == AMDGPU::G_FNEG) {
1785  Src = MI->getOperand(1).getReg();
1786  Mods |= SISrcMods::NEG;
1787  MI = MRI->getVRegDef(Src);
1788  }
1789 
1790  if (MI && MI->getOpcode() == AMDGPU::G_FABS) {
1791  Src = MI->getOperand(1).getReg();
1792  Mods |= SISrcMods::ABS;
1793  }
1794 
1795  return std::make_pair(Src, Mods);
1796 }
1797 
1798 ///
1799 /// This will select either an SGPR or VGPR operand and will save us from
1800 /// having to write an extra tablegen pattern.
1802 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
1803  return {{
1804  [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
1805  }};
1806 }
1807 
1809 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
1810  Register Src;
1811  unsigned Mods;
1812  std::tie(Src, Mods) = selectVOP3ModsImpl(Root.getReg());
1813 
1814  return {{
1815  [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
1816  [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
1817  [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
1818  [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod
1819  }};
1820 }
1821 
1823 AMDGPUInstructionSelector::selectVOP3Mods0Clamp0OMod(MachineOperand &Root) const {
1824  Register Src;
1825  unsigned Mods;
1826  std::tie(Src, Mods) = selectVOP3ModsImpl(Root.getReg());
1827 
1828  return {{
1829  [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
1830  [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
1831  [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
1832  [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod
1833  }};
1834 }
1835 
1837 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
1838  return {{
1839  [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
1840  [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
1841  [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod
1842  }};
1843 }
1844 
1846 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
1847  Register Src;
1848  unsigned Mods;
1849  std::tie(Src, Mods) = selectVOP3ModsImpl(Root.getReg());
1850 
1851  return {{
1852  [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
1853  [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
1854  }};
1855 }
1856 
1858 AMDGPUInstructionSelector::selectVOP3OpSelMods0(MachineOperand &Root) const {
1859  // FIXME: Handle clamp and op_sel
1860  return {{
1861  [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
1862  [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // src_mods
1863  [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // clamp
1864  }};
1865 }
1866 
1868 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
1869  // FIXME: Handle op_sel
1870  return {{
1871  [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
1872  [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods
1873  }};
1874 }
1875 
1877 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
1878  SmallVector<GEPInfo, 4> AddrInfo;
1879  getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
1880 
1881  if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
1882  return None;
1883 
1884  const GEPInfo &GEPInfo = AddrInfo[0];
1885 
1886  if (!AMDGPU::isLegalSMRDImmOffset(STI, GEPInfo.Imm))
1887  return None;
1888 
1889  unsigned PtrReg = GEPInfo.SgprParts[0];
1890  int64_t EncodedImm = AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm);
1891  return {{
1892  [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
1893  [=](MachineInstrBuilder &MIB) { MIB.addImm(EncodedImm); }
1894  }};
1895 }
1896 
1898 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
1899  SmallVector<GEPInfo, 4> AddrInfo;
1900  getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
1901 
1902  if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
1903  return None;
1904 
1905  const GEPInfo &GEPInfo = AddrInfo[0];
1906  unsigned PtrReg = GEPInfo.SgprParts[0];
1907  int64_t EncodedImm = AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm);
1908  if (!isUInt<32>(EncodedImm))
1909  return None;
1910 
1911  return {{
1912  [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
1913  [=](MachineInstrBuilder &MIB) { MIB.addImm(EncodedImm); }
1914  }};
1915 }
1916 
1918 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
1919  MachineInstr *MI = Root.getParent();
1920  MachineBasicBlock *MBB = MI->getParent();
1921 
1922  SmallVector<GEPInfo, 4> AddrInfo;
1923  getAddrModeInfo(*MI, *MRI, AddrInfo);
1924 
1925  // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
1926  // then we can select all ptr + 32-bit offsets not just immediate offsets.
1927  if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
1928  return None;
1929 
1930  const GEPInfo &GEPInfo = AddrInfo[0];
1931  if (!GEPInfo.Imm || !isUInt<32>(GEPInfo.Imm))
1932  return None;
1933 
1934  // If we make it this far we have a load with an 32-bit immediate offset.
1935  // It is OK to select this using a sgpr offset, because we have already
1936  // failed trying to select this load into one of the _IMM variants since
1937  // the _IMM Patterns are considered before the _SGPR patterns.
1938  unsigned PtrReg = GEPInfo.SgprParts[0];
1939  Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
1940  BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg)
1941  .addImm(GEPInfo.Imm);
1942  return {{
1943  [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
1944  [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); }
1945  }};
1946 }
1947 
1948 template <bool Signed>
1950 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root) const {
1951  MachineInstr *MI = Root.getParent();
1952 
1954  [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
1955  [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // offset
1956  [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // slc
1957  }};
1958 
1959  if (!STI.hasFlatInstOffsets())
1960  return Default;
1961 
1962  const MachineInstr *OpDef = MRI->getVRegDef(Root.getReg());
1963  if (!OpDef || OpDef->getOpcode() != AMDGPU::G_GEP)
1964  return Default;
1965 
1967  getConstantVRegVal(OpDef->getOperand(2).getReg(), *MRI);
1968  if (!Offset.hasValue())
1969  return Default;
1970 
1971  unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
1972  if (!TII.isLegalFLATOffset(Offset.getValue(), AddrSpace, Signed))
1973  return Default;
1974 
1975  Register BasePtr = OpDef->getOperand(1).getReg();
1976 
1977  return {{
1978  [=](MachineInstrBuilder &MIB) { MIB.addReg(BasePtr); },
1979  [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset.getValue()); },
1980  [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // slc
1981  }};
1982 }
1983 
1985 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
1986  return selectFlatOffsetImpl<false>(Root);
1987 }
1988 
1990 AMDGPUInstructionSelector::selectFlatOffsetSigned(MachineOperand &Root) const {
1991  return selectFlatOffsetImpl<true>(Root);
1992 }
1993 
1994 static bool isStackPtrRelative(const MachinePointerInfo &PtrInfo) {
1995  auto PSV = PtrInfo.V.dyn_cast<const PseudoSourceValue *>();
1996  return PSV && PSV->isStack();
1997 }
1998 
2000 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
2001  MachineInstr *MI = Root.getParent();
2002  MachineBasicBlock *MBB = MI->getParent();
2003  MachineFunction *MF = MBB->getParent();
2005 
2006  int64_t Offset = 0;
2007  if (mi_match(Root.getReg(), *MRI, m_ICst(Offset))) {
2008  Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2009 
2010  // TODO: Should this be inside the render function? The iterator seems to
2011  // move.
2012  BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
2013  HighBits)
2014  .addImm(Offset & ~4095);
2015 
2016  return {{[=](MachineInstrBuilder &MIB) { // rsrc
2017  MIB.addReg(Info->getScratchRSrcReg());
2018  },
2019  [=](MachineInstrBuilder &MIB) { // vaddr
2020  MIB.addReg(HighBits);
2021  },
2022  [=](MachineInstrBuilder &MIB) { // soffset
2023  const MachineMemOperand *MMO = *MI->memoperands_begin();
2024  const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
2025 
2026  Register SOffsetReg = isStackPtrRelative(PtrInfo)
2027  ? Info->getStackPtrOffsetReg()
2028  : Info->getScratchWaveOffsetReg();
2029  MIB.addReg(SOffsetReg);
2030  },
2031  [=](MachineInstrBuilder &MIB) { // offset
2032  MIB.addImm(Offset & 4095);
2033  }}};
2034  }
2035 
2036  assert(Offset == 0);
2037 
2038  // Try to fold a frame index directly into the MUBUF vaddr field, and any
2039  // offsets.
2040  Optional<int> FI;
2041  Register VAddr = Root.getReg();
2042  if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) {
2043  if (isBaseWithConstantOffset(Root, *MRI)) {
2044  const MachineOperand &LHS = RootDef->getOperand(1);
2045  const MachineOperand &RHS = RootDef->getOperand(2);
2046  const MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
2047  const MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
2048  if (LHSDef && RHSDef) {
2049  int64_t PossibleOffset =
2050  RHSDef->getOperand(1).getCImm()->getSExtValue();
2051  if (SIInstrInfo::isLegalMUBUFImmOffset(PossibleOffset) &&
2053  KnownBits->signBitIsZero(LHS.getReg()))) {
2054  if (LHSDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
2055  FI = LHSDef->getOperand(1).getIndex();
2056  else
2057  VAddr = LHS.getReg();
2058  Offset = PossibleOffset;
2059  }
2060  }
2061  } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
2062  FI = RootDef->getOperand(1).getIndex();
2063  }
2064  }
2065 
2066  // If we don't know this private access is a local stack object, it needs to
2067  // be relative to the entry point's scratch wave offset register.
2068  // TODO: Should split large offsets that don't fit like above.
2069  // TODO: Don't use scratch wave offset just because the offset didn't fit.
2070  Register SOffset = FI.hasValue() ? Info->getStackPtrOffsetReg()
2071  : Info->getScratchWaveOffsetReg();
2072 
2073  return {{[=](MachineInstrBuilder &MIB) { // rsrc
2074  MIB.addReg(Info->getScratchRSrcReg());
2075  },
2076  [=](MachineInstrBuilder &MIB) { // vaddr
2077  if (FI.hasValue())
2078  MIB.addFrameIndex(FI.getValue());
2079  else
2080  MIB.addReg(VAddr);
2081  },
2082  [=](MachineInstrBuilder &MIB) { // soffset
2083  MIB.addReg(SOffset);
2084  },
2085  [=](MachineInstrBuilder &MIB) { // offset
2086  MIB.addImm(Offset);
2087  }}};
2088 }
2089 
2090 bool AMDGPUInstructionSelector::isDSOffsetLegal(const MachineRegisterInfo &MRI,
2091  const MachineOperand &Base,
2092  int64_t Offset,
2093  unsigned OffsetBits) const {
2094  if ((OffsetBits == 16 && !isUInt<16>(Offset)) ||
2095  (OffsetBits == 8 && !isUInt<8>(Offset)))
2096  return false;
2097 
2099  return true;
2100 
2101  // On Southern Islands instruction with a negative base value and an offset
2102  // don't seem to work.
2103  return KnownBits->signBitIsZero(Base.getReg());
2104 }
2105 
2107 AMDGPUInstructionSelector::selectMUBUFScratchOffset(
2108  MachineOperand &Root) const {
2109  MachineInstr *MI = Root.getParent();
2110  MachineBasicBlock *MBB = MI->getParent();
2111 
2112  int64_t Offset = 0;
2113  if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
2115  return {};
2116 
2117  const MachineFunction *MF = MBB->getParent();
2119  const MachineMemOperand *MMO = *MI->memoperands_begin();
2120  const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
2121 
2122  Register SOffsetReg = isStackPtrRelative(PtrInfo)
2123  ? Info->getStackPtrOffsetReg()
2124  : Info->getScratchWaveOffsetReg();
2125  return {{
2126  [=](MachineInstrBuilder &MIB) {
2127  MIB.addReg(Info->getScratchRSrcReg());
2128  }, // rsrc
2129  [=](MachineInstrBuilder &MIB) { MIB.addReg(SOffsetReg); }, // soffset
2130  [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
2131  }};
2132 }
2133 
2135 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
2136  const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
2137  if (!RootDef) {
2138  return {{
2139  [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
2140  [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }
2141  }};
2142  }
2143 
2144  int64_t ConstAddr = 0;
2145  if (isBaseWithConstantOffset(Root, *MRI)) {
2146  const MachineOperand &LHS = RootDef->getOperand(1);
2147  const MachineOperand &RHS = RootDef->getOperand(2);
2148  const MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
2149  const MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
2150  if (LHSDef && RHSDef) {
2151  int64_t PossibleOffset =
2152  RHSDef->getOperand(1).getCImm()->getSExtValue();
2153  if (isDSOffsetLegal(*MRI, LHS, PossibleOffset, 16)) {
2154  // (add n0, c0)
2155  return {{
2156  [=](MachineInstrBuilder &MIB) { MIB.add(LHS); },
2157  [=](MachineInstrBuilder &MIB) { MIB.addImm(PossibleOffset); }
2158  }};
2159  }
2160  }
2161  } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
2162 
2163 
2164 
2165  } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
2166 
2167 
2168  }
2169 
2170  return {{
2171  [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
2172  [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }
2173  }};
2174 }
2175 
2176 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
2177  const MachineInstr &MI) const {
2178  const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
2179  assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
2180  Optional<int64_t> CstVal = getConstantVRegVal(MI.getOperand(0).getReg(), MRI);
2181  assert(CstVal && "Expected constant value");
2182  MIB.addImm(CstVal.getValue());
2183 }
static MachineInstr * buildEXP(const TargetInstrInfo &TII, MachineInstr *Insert, unsigned Tgt, unsigned Reg0, unsigned Reg1, unsigned Reg2, unsigned Reg3, unsigned VM, bool Compr, unsigned Enabled, bool Done)
uint64_t CallInst * C
constexpr bool isUInt< 32 >(uint64_t x)
Definition: MathExtras.h:385
const MachineInstrBuilder & add(const MachineOperand &MO) const
bool hasUsableDSOffset() const
True if the offset field of DS instructions works as expected.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
unsigned getVCC() const
static std::tuple< Register, unsigned, MachineInstr * > getBaseWithConstantOffset(MachineRegisterInfo &MRI, Register Reg)
bool privateMemoryResourceIsRangeChecked() const
static bool isConstant(const MachineInstr &MI)
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1571
This file declares the targeting of the RegisterBankInfo class for AMDGPU.
unsigned getAddrSpace() const
AMDGPU specific subclass of TargetSubtarget.
MachineBasicBlock * getMBB() const
This class represents lattice values for constants.
Definition: AllocatorList.h:23
bool select(MachineInstr &I) override
Select the (possibly generic) instruction I to only use target-specific opcodes.
virtual void setupMF(MachineFunction &mf, GISelKnownBits &KB, CodeGenCoverage &covinfo)
Setup per-MF selector state.
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
unsigned getScalarSizeInBits() const
Address space for 32-bit constant memory.
Definition: AMDGPU.h:277
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:385
bool isScalar() const
static bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:63
unsigned Reg
unsigned getSubReg() const
unsigned less or equal
Definition: InstrTypes.h:758
unsigned less than
Definition: InstrTypes.h:757
LLT getType(unsigned Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register...
uint64_t getSize() const
Return the size in bytes of the memory reference.
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
const TargetRegisterClass * getConstrainedRegClassForOperand(const MachineOperand &MO, const MachineRegisterInfo &MRI) const override
unsigned const TargetRegisterInfo * TRI
A debug info location.
Definition: DebugLoc.h:33
static unsigned getBufferStoreOpcode(LLT Ty, const unsigned MemSize, const bool Offen)
static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size)
static const char * getName()
Address space for region memory. (GDS)
Definition: AMDGPU.h:271
iterator_range< mop_iterator > operands()
Definition: MachineInstr.h:477
bool isPHI() const
bool isInlineConstant(const APInt &Imm) const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
T get() const
Returns the value of the specified pointer type.
Definition: PointerUnion.h:194
MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
Definition: Utils.cpp:303
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
Definition: ISDOpcodes.h:813
static unsigned extractGLC(unsigned AuxiliaryData)
bool isInternalRead() const
static unsigned getAddrSpace(StringRef R)
Definition: DataLayout.cpp:224
AMDGPUInstructionSelector(const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI, const AMDGPUTargetMachine &TM)
bool isEarlyClobber() const
const TargetRegisterClass * getRegClassForSizeOnBank(unsigned Size, const RegisterBank &Bank, const MachineRegisterInfo &MRI) const
bool isVector() const
T dyn_cast() const
Returns the current pointer if it is of the specified pointer type, otherwises returns null...
Definition: PointerUnion.h:201
A description of a memory reference used in the backend.
void setInsertPt(MachineBasicBlock &MBB, MachineBasicBlock::iterator II)
Set the insertion point before the specified position.
static bool shouldUseAndMask(unsigned Size, unsigned &Mask)
This file declares the targeting of the InstructionSelector class for AMDGPU.
bool isBaseWithConstantOffset(const MachineOperand &Root, const MachineRegisterInfo &MRI) const
Return true if the specified operand is a G_GEP with a G_CONSTANT on the right-hand side...
const HexagonInstrInfo * TII
const ConstantFP * getFPImm() const
unsigned getNumOperands() const
Retuns the total number of operands.
Definition: MachineInstr.h:414
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
unsigned SubReg
static unsigned getBufferStoreFormatOpcode(LLT Ty, const unsigned MemSize, const bool Offen)
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:411
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
unsigned getID() const
Return the register class ID number.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
bool isSGPRClass(const TargetRegisterClass *RC) const
#define GET_GLOBALISEL_PREDICATES_INIT
TargetRegisterInfo interface that is implemented by all hw codegen targets.
MachineInstr * getVRegDef(unsigned Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
UnaryOp_match< SrcTy, TargetOpcode::COPY > m_Copy(SrcTy &&Src)
const TargetRegisterClass * getRegClassForTypeOnBank(LLT Ty, const RegisterBank &Bank, const MachineRegisterInfo &MRI) const
const RegClassOrRegBank & getRegClassOrRegBank(unsigned Reg) const
Return the register bank or register class of Reg.
const T & getValue() const LLVM_LVALUE_FUNCTION
Definition: Optional.h:255
MachineFunction & getMF()
Getter for the function we currently build.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:244
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
void setupMF(MachineFunction &MF, GISelKnownBits &KB, CodeGenCoverage &CoverageInfo) override
Setup per-MF selector state.
bool isFPImm() const
isFPImm - Tests if this is a MO_FPImmediate operand.
ArrayRef< int16_t > getRegSplitParts(const TargetRegisterClass *RC, unsigned EltSize) const
#define DEBUG_TYPE
bool hasAddNoCarry() const
static LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
static unsigned getSubRegFromChannel(unsigned Channel, unsigned NumRegs=1)
unsigned getUndefRegState(bool B)
Analysis containing CSE Info
Definition: CSEInfo.cpp:20
void ChangeToImmediate(int64_t ImmVal)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value...
TargetInstrInfo - Interface to description of machine instruction set.
const Value * getValue() const
Return the base address of the memory access.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
static unsigned extractDLC(unsigned AuxiliaryData)
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
#define P(N)
bool hasScalarCompareEq64() const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
int64_t getSMRDEncodedOffset(const MCSubtargetInfo &ST, int64_t ByteOffset)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:148
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
constexpr bool isUInt< 8 >(uint64_t x)
Definition: MathExtras.h:379
bool isPreISelOpcode(QueryType Type=IgnoreBundle) const
Return true if this is an instruction that should go through the usual legalization steps...
Definition: MachineInstr.h:623
bool hasSuperClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a super-class of or equal to this class.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
Address space for local memory.
Definition: AMDGPU.h:274
PointerUnion< const Value *, const PseudoSourceValue * > V
This is the IR pointer value for the access, or it is null if unknown.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static int64_t getFPTrueImmVal(unsigned Size, bool Signed)
Helper class to build MachineInstr.
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
int getMUBUFOpcode(unsigned BaseOpc, unsigned Elements)
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
Definition: MachineInstr.h:567
void setInstr(MachineInstr &MI)
Set the insertion point to before MI.
bool isValid() const
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:732
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const RegisterBank * getRegBankOrNull(unsigned Reg) const
Return the register bank of Reg, or null if Reg has not been assigned a register bank or has been ass...
static unsigned extractSWZ(unsigned AuxiliaryData)
unsigned getAddressSpace() const
The AMDGPU TargetMachine interface definition for hw codgen targets.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
bool isLegalSMRDImmOffset(const MCSubtargetInfo &ST, int64_t ByteOffset)
static bool isSCC(Register Reg, const MachineRegisterInfo &MRI)
bool isCopy() const
size_t size() const
Definition: SmallVector.h:52
This class contains a discriminated union of information about pointers in memory operands...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:40
signed greater than
Definition: InstrTypes.h:759
const APFloat & getValueAPF() const
Definition: Constants.h:302
void setDesc(const MCInstrDesc &tid)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one...
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:552
static uint64_t add(uint64_t LeftOp, uint64_t RightOp)
Definition: FileCheck.cpp:215
RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
MachineOperand class - Representation of each machine instruction operand.
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:837
unsigned getScratchRSrcReg() const
Returns the physical register reserved for use as the resource descriptor for scratch accesses...
MachineInstrBuilder getAddNoCarry(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, unsigned DestReg) const
Return a partially built integer add instruction without carry.
unsigned getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
signed less than
Definition: InstrTypes.h:761
Promote Memory to Register
Definition: Mem2Reg.cpp:109
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition: Utils.cpp:111
DebugLoc getDebugLoc()
Get the current instruction&#39;s debug location.
This class implements the register bank concept.
Definition: RegisterBank.h:28
int64_t getImm() const
MachineInstr * getUniqueVRegDef(unsigned Reg) const
getUniqueVRegDef - Return the unique machine instr that defines the specified virtual register or nul...
static bool Enabled
Definition: Statistic.cpp:50
const TargetRegisterClass * getRegClassForReg(const MachineRegisterInfo &MRI, unsigned Reg) const
This file declares the MachineIRBuilder class.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
static int sizeToSubRegIndex(unsigned Size)
signed less or equal
Definition: InstrTypes.h:762
Class for arbitrary precision integers.
Definition: APInt.h:69
Special value supplied for machine level alias analysis.
static unsigned extractSLC(unsigned AuxiliaryData)
void addImplicitDefUseOperands(MachineFunction &MF)
Add all implicit def and use operands to this instruction.
Optional< int64_t > getConstantVRegVal(unsigned VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
Definition: Utils.cpp:207
#define GET_GLOBALISEL_TEMPORARIES_INIT
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:256
Contains the definition of a TargetInstrInfo class that is common to all AMD GPUs.
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
Provides the logic to select generic machine instructions.
Provides AMDGPU specific target descriptions.
Representation of each machine instruction.
Definition: MachineInstr.h:64
bool hasValue() const
Definition: Optional.h:259
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
const MachinePointerInfo & getPointerInfo() const
const TargetRegisterClass * getBoolRC() const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
ConstantMatch m_ICst(int64_t &Cst)
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:55
unsigned greater or equal
Definition: InstrTypes.h:756
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:44
static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64)
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
static MachineOperand CreateImm(int64_t Val)
#define I(x, y, z)
Definition: MD5.cpp:58
void setSubReg(unsigned subReg)
bool hasFlatInstOffsets() const
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:332
uint32_t Size
Definition: Profile.cpp:46
constexpr bool isUInt< 16 >(uint64_t x)
Definition: MathExtras.h:382
bool isReg() const
isReg - Tests if this is a MO_Register operand.
const TargetRegisterClass * getWaveMaskRegClass() const
bool ldsRequiresM0Init() const
Return if most LDS instructions have an m0 use that require m0 to be iniitalized. ...
const TargetRegisterClass * getRegClassOrNull(unsigned Reg) const
Return the register class of Reg, or null if Reg has not been assigned a register class yet...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isLegalMUBUFImmOffset(unsigned Imm)
Definition: SIInstrInfo.h:1003
unsigned getIntrinsicID() const
Returns the Intrinsic::ID for this instruction.
LLVM Value Representation.
Definition: Value.h:74
uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:80
static bool isStackPtrRelative(const MachinePointerInfo &PtrInfo)
IRTranslator LLVM IR MI
bool isLegalFLATOffset(int64_t Offset, unsigned AddrSpace, bool Signed) const
Returns if Offset is legal for the subtarget as the offset to a FLAT encoded instruction.
unsigned greater than
Definition: InstrTypes.h:755
void setRegClass(unsigned Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
APInt bitcastToAPInt() const
Definition: APFloat.h:1109
bool unsafeDSOffsetFoldingEnabled() const
Register getReg() const
getReg - Returns the register number.
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition: Constants.h:156
#define LLVM_DEBUG(X)
Definition: Debug.h:122
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:416
const ConstantInt * getCImm() const
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
static bool isZero(Register Reg, MachineRegisterInfo &MRI)
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
signed greater or equal
Definition: InstrTypes.h:760
unsigned getID() const
Get the identifier of this register bank.
Definition: RegisterBank.h:47
uint64_t PowerOf2Ceil(uint64_t A)
Returns the power of two which is greater than or equal to the given value.
Definition: MathExtras.h:691
bool isImplicit() const
A discriminated union of two or more pointer types, with the discriminator in the low bit of the poin...
Definition: PointerUnion.h:156
unsigned getPredicate() const