LLVM  15.0.0git
AMDGPUInstructionSelector.cpp
Go to the documentation of this file.
1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// AMDGPU.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
15 #include "AMDGPU.h"
16 #include "AMDGPUGlobalISelUtils.h"
17 #include "AMDGPUInstrInfo.h"
18 #include "AMDGPURegisterBankInfo.h"
19 #include "AMDGPUTargetMachine.h"
20 #include "SIMachineFunctionInfo.h"
21 #include "Utils/AMDGPUBaseInfo.h"
27 #include "llvm/IR/DiagnosticInfo.h"
28 #include "llvm/IR/IntrinsicsAMDGPU.h"
29 
30 #define DEBUG_TYPE "amdgpu-isel"
31 
32 using namespace llvm;
33 using namespace MIPatternMatch;
34 
36  "amdgpu-global-isel-risky-select",
37  cl::desc("Allow GlobalISel to select cases that are likely to not work yet"),
38  cl::init(false),
40 
41 #define GET_GLOBALISEL_IMPL
42 #define AMDGPUSubtarget GCNSubtarget
43 #include "AMDGPUGenGlobalISel.inc"
44 #undef GET_GLOBALISEL_IMPL
45 #undef AMDGPUSubtarget
46 
48  const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
49  const AMDGPUTargetMachine &TM)
50  : TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
51  STI(STI),
52  EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
54 #include "AMDGPUGenGlobalISel.inc"
57 #include "AMDGPUGenGlobalISel.inc"
59 {
60 }
61 
62 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
63 
65  CodeGenCoverage &CoverageInfo,
66  ProfileSummaryInfo *PSI,
68  MRI = &MF.getRegInfo();
69  Subtarget = &MF.getSubtarget<GCNSubtarget>();
71 }
72 
73 bool AMDGPUInstructionSelector::isVCC(Register Reg,
74  const MachineRegisterInfo &MRI) const {
75  // The verifier is oblivious to s1 being a valid value for wavesize registers.
76  if (Reg.isPhysical())
77  return false;
78 
79  auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
80  const TargetRegisterClass *RC =
81  RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
82  if (RC) {
83  const LLT Ty = MRI.getType(Reg);
84  if (!Ty.isValid() || Ty.getSizeInBits() != 1)
85  return false;
86  // G_TRUNC s1 result is never vcc.
87  return MRI.getVRegDef(Reg)->getOpcode() != AMDGPU::G_TRUNC &&
88  RC->hasSuperClassEq(TRI.getBoolRC());
89  }
90 
91  const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
92  return RB->getID() == AMDGPU::VCCRegBankID;
93 }
94 
95 bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI,
96  unsigned NewOpc) const {
97  MI.setDesc(TII.get(NewOpc));
98  MI.removeOperand(1); // Remove intrinsic ID.
99  MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
100 
101  MachineOperand &Dst = MI.getOperand(0);
102  MachineOperand &Src = MI.getOperand(1);
103 
104  // TODO: This should be legalized to s32 if needed
105  if (MRI->getType(Dst.getReg()) == LLT::scalar(1))
106  return false;
107 
108  const TargetRegisterClass *DstRC
109  = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
110  const TargetRegisterClass *SrcRC
111  = TRI.getConstrainedRegClassForOperand(Src, *MRI);
112  if (!DstRC || DstRC != SrcRC)
113  return false;
114 
115  return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) &&
116  RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI);
117 }
118 
119 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
120  const DebugLoc &DL = I.getDebugLoc();
121  MachineBasicBlock *BB = I.getParent();
122  I.setDesc(TII.get(TargetOpcode::COPY));
123 
124  const MachineOperand &Src = I.getOperand(1);
125  MachineOperand &Dst = I.getOperand(0);
126  Register DstReg = Dst.getReg();
127  Register SrcReg = Src.getReg();
128 
129  if (isVCC(DstReg, *MRI)) {
130  if (SrcReg == AMDGPU::SCC) {
131  const TargetRegisterClass *RC
132  = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
133  if (!RC)
134  return true;
135  return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
136  }
137 
138  if (!isVCC(SrcReg, *MRI)) {
139  // TODO: Should probably leave the copy and let copyPhysReg expand it.
140  if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI))
141  return false;
142 
143  const TargetRegisterClass *SrcRC
144  = TRI.getConstrainedRegClassForOperand(Src, *MRI);
145 
146  Optional<ValueAndVReg> ConstVal =
147  getIConstantVRegValWithLookThrough(SrcReg, *MRI, true);
148  if (ConstVal) {
149  unsigned MovOpc =
150  STI.isWave64() ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
151  BuildMI(*BB, &I, DL, TII.get(MovOpc), DstReg)
152  .addImm(ConstVal->Value.getBoolValue() ? -1 : 0);
153  } else {
154  Register MaskedReg = MRI->createVirtualRegister(SrcRC);
155 
156  // We can't trust the high bits at this point, so clear them.
157 
158  // TODO: Skip masking high bits if def is known boolean.
159 
160  unsigned AndOpc =
161  TRI.isSGPRClass(SrcRC) ? AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32;
162  BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg)
163  .addImm(1)
164  .addReg(SrcReg);
165  BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
166  .addImm(0)
167  .addReg(MaskedReg);
168  }
169 
170  if (!MRI->getRegClassOrNull(SrcReg))
171  MRI->setRegClass(SrcReg, SrcRC);
172  I.eraseFromParent();
173  return true;
174  }
175 
176  const TargetRegisterClass *RC =
177  TRI.getConstrainedRegClassForOperand(Dst, *MRI);
178  if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
179  return false;
180 
181  return true;
182  }
183 
184  for (const MachineOperand &MO : I.operands()) {
185  if (MO.getReg().isPhysical())
186  continue;
187 
188  const TargetRegisterClass *RC =
189  TRI.getConstrainedRegClassForOperand(MO, *MRI);
190  if (!RC)
191  continue;
192  RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
193  }
194  return true;
195 }
196 
197 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
198  const Register DefReg = I.getOperand(0).getReg();
199  const LLT DefTy = MRI->getType(DefReg);
200  if (DefTy == LLT::scalar(1)) {
201  if (!AllowRiskySelect) {
202  LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n");
203  return false;
204  }
205 
206  LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n");
207  }
208 
209  // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
210 
211  const RegClassOrRegBank &RegClassOrBank =
212  MRI->getRegClassOrRegBank(DefReg);
213 
214  const TargetRegisterClass *DefRC
215  = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
216  if (!DefRC) {
217  if (!DefTy.isValid()) {
218  LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
219  return false;
220  }
221 
222  const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
223  DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB);
224  if (!DefRC) {
225  LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
226  return false;
227  }
228  }
229 
230  // TODO: Verify that all registers have the same bank
231  I.setDesc(TII.get(TargetOpcode::PHI));
232  return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
233 }
234 
236 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
237  const TargetRegisterClass &SubRC,
238  unsigned SubIdx) const {
239 
240  MachineInstr *MI = MO.getParent();
242  Register DstReg = MRI->createVirtualRegister(&SubRC);
243 
244  if (MO.isReg()) {
245  unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
246  Register Reg = MO.getReg();
247  BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
248  .addReg(Reg, 0, ComposedSubIdx);
249 
250  return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
251  MO.isKill(), MO.isDead(), MO.isUndef(),
252  MO.isEarlyClobber(), 0, MO.isDebug(),
253  MO.isInternalRead());
254  }
255 
256  assert(MO.isImm());
257 
258  APInt Imm(64, MO.getImm());
259 
260  switch (SubIdx) {
261  default:
262  llvm_unreachable("do not know to split immediate with this sub index.");
263  case AMDGPU::sub0:
264  return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
265  case AMDGPU::sub1:
266  return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
267  }
268 }
269 
270 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
271  switch (Opc) {
272  case AMDGPU::G_AND:
273  return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
274  case AMDGPU::G_OR:
275  return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
276  case AMDGPU::G_XOR:
277  return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
278  default:
279  llvm_unreachable("not a bit op");
280  }
281 }
282 
283 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
284  Register DstReg = I.getOperand(0).getReg();
285  unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
286 
287  const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
288  if (DstRB->getID() != AMDGPU::SGPRRegBankID &&
289  DstRB->getID() != AMDGPU::VCCRegBankID)
290  return false;
291 
292  bool Is64 = Size > 32 || (DstRB->getID() == AMDGPU::VCCRegBankID &&
293  STI.isWave64());
294  I.setDesc(TII.get(getLogicalBitOpcode(I.getOpcode(), Is64)));
295 
296  // Dead implicit-def of scc
297  I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
298  true, // isImp
299  false, // isKill
300  true)); // isDead
301  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
302 }
303 
304 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
305  MachineBasicBlock *BB = I.getParent();
306  MachineFunction *MF = BB->getParent();
307  Register DstReg = I.getOperand(0).getReg();
308  const DebugLoc &DL = I.getDebugLoc();
309  LLT Ty = MRI->getType(DstReg);
310  if (Ty.isVector())
311  return false;
312 
313  unsigned Size = Ty.getSizeInBits();
314  const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
315  const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
316  const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
317 
318  if (Size == 32) {
319  if (IsSALU) {
320  const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
321  MachineInstr *Add =
322  BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
323  .add(I.getOperand(1))
324  .add(I.getOperand(2));
325  I.eraseFromParent();
326  return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
327  }
328 
329  if (STI.hasAddNoCarry()) {
330  const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
331  I.setDesc(TII.get(Opc));
332  I.addOperand(*MF, MachineOperand::CreateImm(0));
333  I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
334  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
335  }
336 
337  const unsigned Opc = Sub ? AMDGPU::V_SUB_CO_U32_e64 : AMDGPU::V_ADD_CO_U32_e64;
338 
341  = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
342  .addDef(UnusedCarry, RegState::Dead)
343  .add(I.getOperand(1))
344  .add(I.getOperand(2))
345  .addImm(0);
346  I.eraseFromParent();
347  return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
348  }
349 
350  assert(!Sub && "illegal sub should not reach here");
351 
352  const TargetRegisterClass &RC
353  = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
354  const TargetRegisterClass &HalfRC
355  = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
356 
357  MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
358  MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
359  MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
360  MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
361 
362  Register DstLo = MRI->createVirtualRegister(&HalfRC);
363  Register DstHi = MRI->createVirtualRegister(&HalfRC);
364 
365  if (IsSALU) {
366  BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
367  .add(Lo1)
368  .add(Lo2);
369  BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
370  .add(Hi1)
371  .add(Hi2);
372  } else {
373  const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
374  Register CarryReg = MRI->createVirtualRegister(CarryRC);
375  BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_CO_U32_e64), DstLo)
376  .addDef(CarryReg)
377  .add(Lo1)
378  .add(Lo2)
379  .addImm(0);
380  MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
382  .add(Hi1)
383  .add(Hi2)
384  .addReg(CarryReg, RegState::Kill)
385  .addImm(0);
386 
387  if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
388  return false;
389  }
390 
391  BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
392  .addReg(DstLo)
393  .addImm(AMDGPU::sub0)
394  .addReg(DstHi)
395  .addImm(AMDGPU::sub1);
396 
397 
398  if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
399  return false;
400 
401  I.eraseFromParent();
402  return true;
403 }
404 
405 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE(
406  MachineInstr &I) const {
407  MachineBasicBlock *BB = I.getParent();
408  MachineFunction *MF = BB->getParent();
409  const DebugLoc &DL = I.getDebugLoc();
410  Register Dst0Reg = I.getOperand(0).getReg();
411  Register Dst1Reg = I.getOperand(1).getReg();
412  const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO ||
413  I.getOpcode() == AMDGPU::G_UADDE;
414  const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE ||
415  I.getOpcode() == AMDGPU::G_USUBE;
416 
417  if (isVCC(Dst1Reg, *MRI)) {
418  unsigned NoCarryOpc =
419  IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
420  unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
421  I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc));
422  I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
423  I.addOperand(*MF, MachineOperand::CreateImm(0));
424  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
425  }
426 
427  Register Src0Reg = I.getOperand(2).getReg();
428  Register Src1Reg = I.getOperand(3).getReg();
429 
430  if (HasCarryIn) {
431  BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
432  .addReg(I.getOperand(4).getReg());
433  }
434 
435  unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
436  unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
437 
438  BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg)
439  .add(I.getOperand(2))
440  .add(I.getOperand(3));
441  BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg)
443 
444  if (!MRI->getRegClassOrNull(Dst1Reg))
445  MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass);
446 
447  if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
448  !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
449  !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI))
450  return false;
451 
452  if (HasCarryIn &&
453  !RBI.constrainGenericRegister(I.getOperand(4).getReg(),
454  AMDGPU::SReg_32RegClass, *MRI))
455  return false;
456 
457  I.eraseFromParent();
458  return true;
459 }
460 
461 bool AMDGPUInstructionSelector::selectG_AMDGPU_MAD_64_32(
462  MachineInstr &I) const {
463  MachineBasicBlock *BB = I.getParent();
464  MachineFunction *MF = BB->getParent();
465  const bool IsUnsigned = I.getOpcode() == AMDGPU::G_AMDGPU_MAD_U64_U32;
466 
467  unsigned Opc;
468  if (Subtarget->getGeneration() == AMDGPUSubtarget::GFX11)
469  Opc = IsUnsigned ? AMDGPU::V_MAD_U64_U32_gfx11_e64
470  : AMDGPU::V_MAD_I64_I32_gfx11_e64;
471  else
472  Opc = IsUnsigned ? AMDGPU::V_MAD_U64_U32_e64 : AMDGPU::V_MAD_I64_I32_e64;
473  I.setDesc(TII.get(Opc));
474  I.addOperand(*MF, MachineOperand::CreateImm(0));
475  I.addImplicitDefUseOperands(*MF);
476  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
477 }
478 
479 // TODO: We should probably legalize these to only using 32-bit results.
480 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
481  MachineBasicBlock *BB = I.getParent();
482  Register DstReg = I.getOperand(0).getReg();
483  Register SrcReg = I.getOperand(1).getReg();
484  LLT DstTy = MRI->getType(DstReg);
485  LLT SrcTy = MRI->getType(SrcReg);
486  const unsigned SrcSize = SrcTy.getSizeInBits();
487  unsigned DstSize = DstTy.getSizeInBits();
488 
489  // TODO: Should handle any multiple of 32 offset.
490  unsigned Offset = I.getOperand(2).getImm();
491  if (Offset % 32 != 0 || DstSize > 128)
492  return false;
493 
494  // 16-bit operations really use 32-bit registers.
495  // FIXME: Probably should not allow 16-bit G_EXTRACT results.
496  if (DstSize == 16)
497  DstSize = 32;
498 
499  const TargetRegisterClass *DstRC =
500  TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI);
501  if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
502  return false;
503 
504  const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
505  const TargetRegisterClass *SrcRC =
506  TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank);
507  if (!SrcRC)
508  return false;
509  unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32,
510  DstSize / 32);
511  SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg);
512  if (!SrcRC)
513  return false;
514 
515  SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I,
516  *SrcRC, I.getOperand(1));
517  const DebugLoc &DL = I.getDebugLoc();
518  BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg)
519  .addReg(SrcReg, 0, SubReg);
520 
521  I.eraseFromParent();
522  return true;
523 }
524 
525 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
526  MachineBasicBlock *BB = MI.getParent();
527  Register DstReg = MI.getOperand(0).getReg();
528  LLT DstTy = MRI->getType(DstReg);
529  LLT SrcTy = MRI->getType(MI.getOperand(1).getReg());
530 
531  const unsigned SrcSize = SrcTy.getSizeInBits();
532  if (SrcSize < 32)
533  return selectImpl(MI, *CoverageInfo);
534 
535  const DebugLoc &DL = MI.getDebugLoc();
536  const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
537  const unsigned DstSize = DstTy.getSizeInBits();
538  const TargetRegisterClass *DstRC =
539  TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
540  if (!DstRC)
541  return false;
542 
543  ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
544  MachineInstrBuilder MIB =
545  BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
546  for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
547  MachineOperand &Src = MI.getOperand(I + 1);
548  MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
549  MIB.addImm(SubRegs[I]);
550 
551  const TargetRegisterClass *SrcRC
552  = TRI.getConstrainedRegClassForOperand(Src, *MRI);
553  if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI))
554  return false;
555  }
556 
557  if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
558  return false;
559 
560  MI.eraseFromParent();
561  return true;
562 }
563 
564 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
565  MachineBasicBlock *BB = MI.getParent();
566  const int NumDst = MI.getNumOperands() - 1;
567 
568  MachineOperand &Src = MI.getOperand(NumDst);
569 
570  Register SrcReg = Src.getReg();
571  Register DstReg0 = MI.getOperand(0).getReg();
572  LLT DstTy = MRI->getType(DstReg0);
573  LLT SrcTy = MRI->getType(SrcReg);
574 
575  const unsigned DstSize = DstTy.getSizeInBits();
576  const unsigned SrcSize = SrcTy.getSizeInBits();
577  const DebugLoc &DL = MI.getDebugLoc();
578  const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
579 
580  const TargetRegisterClass *SrcRC =
581  TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank);
582  if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
583  return false;
584 
585  // Note we could have mixed SGPR and VGPR destination banks for an SGPR
586  // source, and this relies on the fact that the same subregister indices are
587  // used for both.
588  ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
589  for (int I = 0, E = NumDst; I != E; ++I) {
590  MachineOperand &Dst = MI.getOperand(I);
591  BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
592  .addReg(SrcReg, 0, SubRegs[I]);
593 
594  // Make sure the subregister index is valid for the source register.
595  SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegs[I]);
596  if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
597  return false;
598 
599  const TargetRegisterClass *DstRC =
600  TRI.getConstrainedRegClassForOperand(Dst, *MRI);
601  if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI))
602  return false;
603  }
604 
605  MI.eraseFromParent();
606  return true;
607 }
608 
609 bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR_TRUNC(
610  MachineInstr &MI) const {
611  if (selectImpl(MI, *CoverageInfo))
612  return true;
613 
614  const LLT S32 = LLT::scalar(32);
615  const LLT V2S16 = LLT::fixed_vector(2, 16);
616 
617  Register Dst = MI.getOperand(0).getReg();
618  if (MRI->getType(Dst) != V2S16)
619  return false;
620 
621  const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI);
622  if (DstBank->getID() != AMDGPU::SGPRRegBankID)
623  return false;
624 
625  Register Src0 = MI.getOperand(1).getReg();
626  Register Src1 = MI.getOperand(2).getReg();
627  if (MRI->getType(Src0) != S32)
628  return false;
629 
630  const DebugLoc &DL = MI.getDebugLoc();
631  MachineBasicBlock *BB = MI.getParent();
632 
633  auto ConstSrc1 = getAnyConstantVRegValWithLookThrough(Src1, *MRI, true, true);
634  if (ConstSrc1) {
635  auto ConstSrc0 =
636  getAnyConstantVRegValWithLookThrough(Src0, *MRI, true, true);
637  if (ConstSrc0) {
638  const int64_t K0 = ConstSrc0->Value.getSExtValue();
639  const int64_t K1 = ConstSrc1->Value.getSExtValue();
640  uint32_t Lo16 = static_cast<uint32_t>(K0) & 0xffff;
641  uint32_t Hi16 = static_cast<uint32_t>(K1) & 0xffff;
642 
643  BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), Dst)
644  .addImm(Lo16 | (Hi16 << 16));
645  MI.eraseFromParent();
646  return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI);
647  }
648  }
649 
650  // TODO: This should probably be a combine somewhere
651  // (build_vector_trunc $src0, undef -> copy $src0
652  MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI);
653  if (Src1Def && Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) {
654  MI.setDesc(TII.get(AMDGPU::COPY));
655  MI.removeOperand(2);
656  return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI) &&
657  RBI.constrainGenericRegister(Src0, AMDGPU::SReg_32RegClass, *MRI);
658  }
659 
660  Register ShiftSrc0;
661  Register ShiftSrc1;
662 
663  // With multiple uses of the shift, this will duplicate the shift and
664  // increase register pressure.
665  //
666  // (build_vector_trunc (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16)
667  // => (S_PACK_HH_B32_B16 $src0, $src1)
668  // (build_vector_trunc $src0, (lshr_oneuse SReg_32:$src1, 16))
669  // => (S_PACK_LH_B32_B16 $src0, $src1)
670  // (build_vector_trunc $src0, $src1)
671  // => (S_PACK_LL_B32_B16 $src0, $src1)
672 
673  bool Shift0 = mi_match(
674  Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_SpecificICst(16))));
675 
676  bool Shift1 = mi_match(
677  Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_SpecificICst(16))));
678 
679  unsigned Opc = AMDGPU::S_PACK_LL_B32_B16;
680  if (Shift0 && Shift1) {
681  Opc = AMDGPU::S_PACK_HH_B32_B16;
682  MI.getOperand(1).setReg(ShiftSrc0);
683  MI.getOperand(2).setReg(ShiftSrc1);
684  } else if (Shift1) {
685  Opc = AMDGPU::S_PACK_LH_B32_B16;
686  MI.getOperand(2).setReg(ShiftSrc1);
687  } else if (Shift0 && ConstSrc1 && ConstSrc1->Value == 0) {
688  // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16
689  auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst)
690  .addReg(ShiftSrc0)
691  .addImm(16);
692 
693  MI.eraseFromParent();
694  return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
695  }
696 
697  MI.setDesc(TII.get(Opc));
698  return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
699 }
700 
701 bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
702  return selectG_ADD_SUB(I);
703 }
704 
705 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
706  const MachineOperand &MO = I.getOperand(0);
707 
708  // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
709  // regbank check here is to know why getConstrainedRegClassForOperand failed.
710  const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI);
711  if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) ||
712  (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) {
713  I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
714  return true;
715  }
716 
717  return false;
718 }
719 
720 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
721  MachineBasicBlock *BB = I.getParent();
722 
723  Register DstReg = I.getOperand(0).getReg();
724  Register Src0Reg = I.getOperand(1).getReg();
725  Register Src1Reg = I.getOperand(2).getReg();
726  LLT Src1Ty = MRI->getType(Src1Reg);
727 
728  unsigned DstSize = MRI->getType(DstReg).getSizeInBits();
729  unsigned InsSize = Src1Ty.getSizeInBits();
730 
731  int64_t Offset = I.getOperand(3).getImm();
732 
733  // FIXME: These cases should have been illegal and unnecessary to check here.
734  if (Offset % 32 != 0 || InsSize % 32 != 0)
735  return false;
736 
737  // Currently not handled by getSubRegFromChannel.
738  if (InsSize > 128)
739  return false;
740 
741  unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32);
742  if (SubReg == AMDGPU::NoSubRegister)
743  return false;
744 
745  const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
746  const TargetRegisterClass *DstRC =
747  TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
748  if (!DstRC)
749  return false;
750 
751  const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI);
752  const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI);
753  const TargetRegisterClass *Src0RC =
754  TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank);
755  const TargetRegisterClass *Src1RC =
756  TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank);
757 
758  // Deal with weird cases where the class only partially supports the subreg
759  // index.
760  Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg);
761  if (!Src0RC || !Src1RC)
762  return false;
763 
764  if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
765  !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) ||
766  !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI))
767  return false;
768 
769  const DebugLoc &DL = I.getDebugLoc();
770  BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
771  .addReg(Src0Reg)
772  .addReg(Src1Reg)
773  .addImm(SubReg);
774 
775  I.eraseFromParent();
776  return true;
777 }
778 
779 bool AMDGPUInstructionSelector::selectG_SBFX_UBFX(MachineInstr &MI) const {
780  Register DstReg = MI.getOperand(0).getReg();
781  Register SrcReg = MI.getOperand(1).getReg();
782  Register OffsetReg = MI.getOperand(2).getReg();
783  Register WidthReg = MI.getOperand(3).getReg();
784 
785  assert(RBI.getRegBank(DstReg, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID &&
786  "scalar BFX instructions are expanded in regbankselect");
787  assert(MRI->getType(MI.getOperand(0).getReg()).getSizeInBits() == 32 &&
788  "64-bit vector BFX instructions are expanded in regbankselect");
789 
790  const DebugLoc &DL = MI.getDebugLoc();
791  MachineBasicBlock *MBB = MI.getParent();
792 
793  bool IsSigned = MI.getOpcode() == TargetOpcode::G_SBFX;
794  unsigned Opc = IsSigned ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
795  auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), DstReg)
796  .addReg(SrcReg)
797  .addReg(OffsetReg)
798  .addReg(WidthReg);
799  MI.eraseFromParent();
800  return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
801 }
802 
803 bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const {
804  if (STI.getLDSBankCount() != 16)
805  return selectImpl(MI, *CoverageInfo);
806 
807  Register Dst = MI.getOperand(0).getReg();
808  Register Src0 = MI.getOperand(2).getReg();
809  Register M0Val = MI.getOperand(6).getReg();
810  if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) ||
811  !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) ||
812  !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI))
813  return false;
814 
815  // This requires 2 instructions. It is possible to write a pattern to support
816  // this, but the generated isel emitter doesn't correctly deal with multiple
817  // output instructions using the same physical register input. The copy to m0
818  // is incorrectly placed before the second instruction.
819  //
820  // TODO: Match source modifiers.
821 
822  Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
823  const DebugLoc &DL = MI.getDebugLoc();
824  MachineBasicBlock *MBB = MI.getParent();
825 
826  BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
827  .addReg(M0Val);
828  BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov)
829  .addImm(2)
830  .addImm(MI.getOperand(4).getImm()) // $attr
831  .addImm(MI.getOperand(3).getImm()); // $attrchan
832 
833  BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst)
834  .addImm(0) // $src0_modifiers
835  .addReg(Src0) // $src0
836  .addImm(MI.getOperand(4).getImm()) // $attr
837  .addImm(MI.getOperand(3).getImm()) // $attrchan
838  .addImm(0) // $src2_modifiers
839  .addReg(InterpMov) // $src2 - 2 f16 values selected by high
840  .addImm(MI.getOperand(5).getImm()) // $high
841  .addImm(0) // $clamp
842  .addImm(0); // $omod
843 
844  MI.eraseFromParent();
845  return true;
846 }
847 
848 // Writelane is special in that it can use SGPR and M0 (which would normally
849 // count as using the constant bus twice - but in this case it is allowed since
850 // the lane selector doesn't count as a use of the constant bus). However, it is
851 // still required to abide by the 1 SGPR rule. Fix this up if we might have
852 // multiple SGPRs.
853 bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const {
854  // With a constant bus limit of at least 2, there's no issue.
855  if (STI.getConstantBusLimit(AMDGPU::V_WRITELANE_B32) > 1)
856  return selectImpl(MI, *CoverageInfo);
857 
858  MachineBasicBlock *MBB = MI.getParent();
859  const DebugLoc &DL = MI.getDebugLoc();
860  Register VDst = MI.getOperand(0).getReg();
861  Register Val = MI.getOperand(2).getReg();
862  Register LaneSelect = MI.getOperand(3).getReg();
863  Register VDstIn = MI.getOperand(4).getReg();
864 
865  auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst);
866 
867  Optional<ValueAndVReg> ConstSelect =
868  getIConstantVRegValWithLookThrough(LaneSelect, *MRI);
869  if (ConstSelect) {
870  // The selector has to be an inline immediate, so we can use whatever for
871  // the other operands.
872  MIB.addReg(Val);
873  MIB.addImm(ConstSelect->Value.getSExtValue() &
874  maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2()));
875  } else {
876  Optional<ValueAndVReg> ConstVal =
878 
879  // If the value written is an inline immediate, we can get away without a
880  // copy to m0.
881  if (ConstVal && AMDGPU::isInlinableLiteral32(ConstVal->Value.getSExtValue(),
882  STI.hasInv2PiInlineImm())) {
883  MIB.addImm(ConstVal->Value.getSExtValue());
884  MIB.addReg(LaneSelect);
885  } else {
886  MIB.addReg(Val);
887 
888  // If the lane selector was originally in a VGPR and copied with
889  // readfirstlane, there's a hazard to read the same SGPR from the
890  // VALU. Constrain to a different SGPR to help avoid needing a nop later.
891  RBI.constrainGenericRegister(LaneSelect, AMDGPU::SReg_32_XM0RegClass, *MRI);
892 
893  BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
894  .addReg(LaneSelect);
895  MIB.addReg(AMDGPU::M0);
896  }
897  }
898 
899  MIB.addReg(VDstIn);
900 
901  MI.eraseFromParent();
902  return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
903 }
904 
905 // We need to handle this here because tablegen doesn't support matching
906 // instructions with multiple outputs.
907 bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const {
908  Register Dst0 = MI.getOperand(0).getReg();
909  Register Dst1 = MI.getOperand(1).getReg();
910 
911  LLT Ty = MRI->getType(Dst0);
912  unsigned Opc;
913  if (Ty == LLT::scalar(32))
914  Opc = AMDGPU::V_DIV_SCALE_F32_e64;
915  else if (Ty == LLT::scalar(64))
916  Opc = AMDGPU::V_DIV_SCALE_F64_e64;
917  else
918  return false;
919 
920  // TODO: Match source modifiers.
921 
922  const DebugLoc &DL = MI.getDebugLoc();
923  MachineBasicBlock *MBB = MI.getParent();
924 
925  Register Numer = MI.getOperand(3).getReg();
926  Register Denom = MI.getOperand(4).getReg();
927  unsigned ChooseDenom = MI.getOperand(5).getImm();
928 
929  Register Src0 = ChooseDenom != 0 ? Numer : Denom;
930 
931  auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), Dst0)
932  .addDef(Dst1)
933  .addImm(0) // $src0_modifiers
934  .addUse(Src0) // $src0
935  .addImm(0) // $src1_modifiers
936  .addUse(Denom) // $src1
937  .addImm(0) // $src2_modifiers
938  .addUse(Numer) // $src2
939  .addImm(0) // $clamp
940  .addImm(0); // $omod
941 
942  MI.eraseFromParent();
943  return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
944 }
945 
946 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
947  unsigned IntrinsicID = I.getIntrinsicID();
948  switch (IntrinsicID) {
949  case Intrinsic::amdgcn_if_break: {
950  MachineBasicBlock *BB = I.getParent();
951 
952  // FIXME: Manually selecting to avoid dealing with the SReg_1 trick
953  // SelectionDAG uses for wave32 vs wave64.
954  BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
955  .add(I.getOperand(0))
956  .add(I.getOperand(2))
957  .add(I.getOperand(3));
958 
959  Register DstReg = I.getOperand(0).getReg();
960  Register Src0Reg = I.getOperand(2).getReg();
961  Register Src1Reg = I.getOperand(3).getReg();
962 
963  I.eraseFromParent();
964 
965  for (Register Reg : { DstReg, Src0Reg, Src1Reg })
967 
968  return true;
969  }
970  case Intrinsic::amdgcn_interp_p1_f16:
971  return selectInterpP1F16(I);
972  case Intrinsic::amdgcn_wqm:
973  return constrainCopyLikeIntrin(I, AMDGPU::WQM);
974  case Intrinsic::amdgcn_softwqm:
975  return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM);
976  case Intrinsic::amdgcn_strict_wwm:
977  case Intrinsic::amdgcn_wwm:
978  return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WWM);
979  case Intrinsic::amdgcn_strict_wqm:
980  return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WQM);
981  case Intrinsic::amdgcn_writelane:
982  return selectWritelane(I);
983  case Intrinsic::amdgcn_div_scale:
984  return selectDivScale(I);
985  case Intrinsic::amdgcn_icmp:
986  return selectIntrinsicIcmp(I);
987  case Intrinsic::amdgcn_ballot:
988  return selectBallot(I);
989  case Intrinsic::amdgcn_reloc_constant:
990  return selectRelocConstant(I);
991  case Intrinsic::amdgcn_groupstaticsize:
992  return selectGroupStaticSize(I);
993  case Intrinsic::returnaddress:
994  return selectReturnAddress(I);
995  case Intrinsic::amdgcn_smfmac_f32_16x16x32_f16:
996  case Intrinsic::amdgcn_smfmac_f32_32x32x16_f16:
997  case Intrinsic::amdgcn_smfmac_f32_16x16x32_bf16:
998  case Intrinsic::amdgcn_smfmac_f32_32x32x16_bf16:
999  case Intrinsic::amdgcn_smfmac_i32_16x16x64_i8:
1000  case Intrinsic::amdgcn_smfmac_i32_32x32x32_i8:
1001  return selectSMFMACIntrin(I);
1002  default:
1003  return selectImpl(I, *CoverageInfo);
1004  }
1005 }
1006 
1007 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) {
1008  if (Size != 32 && Size != 64)
1009  return -1;
1010  switch (P) {
1011  default:
1012  llvm_unreachable("Unknown condition code!");
1013  case CmpInst::ICMP_NE:
1014  return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64;
1015  case CmpInst::ICMP_EQ:
1016  return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64;
1017  case CmpInst::ICMP_SGT:
1018  return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64;
1019  case CmpInst::ICMP_SGE:
1020  return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64;
1021  case CmpInst::ICMP_SLT:
1022  return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64;
1023  case CmpInst::ICMP_SLE:
1024  return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64;
1025  case CmpInst::ICMP_UGT:
1026  return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64;
1027  case CmpInst::ICMP_UGE:
1028  return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64;
1029  case CmpInst::ICMP_ULT:
1030  return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64;
1031  case CmpInst::ICMP_ULE:
1032  return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64;
1033  }
1034 }
1035 
1036 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
1037  unsigned Size) const {
1038  if (Size == 64) {
1039  if (!STI.hasScalarCompareEq64())
1040  return -1;
1041 
1042  switch (P) {
1043  case CmpInst::ICMP_NE:
1044  return AMDGPU::S_CMP_LG_U64;
1045  case CmpInst::ICMP_EQ:
1046  return AMDGPU::S_CMP_EQ_U64;
1047  default:
1048  return -1;
1049  }
1050  }
1051 
1052  if (Size != 32)
1053  return -1;
1054 
1055  switch (P) {
1056  case CmpInst::ICMP_NE:
1057  return AMDGPU::S_CMP_LG_U32;
1058  case CmpInst::ICMP_EQ:
1059  return AMDGPU::S_CMP_EQ_U32;
1060  case CmpInst::ICMP_SGT:
1061  return AMDGPU::S_CMP_GT_I32;
1062  case CmpInst::ICMP_SGE:
1063  return AMDGPU::S_CMP_GE_I32;
1064  case CmpInst::ICMP_SLT:
1065  return AMDGPU::S_CMP_LT_I32;
1066  case CmpInst::ICMP_SLE:
1067  return AMDGPU::S_CMP_LE_I32;
1068  case CmpInst::ICMP_UGT:
1069  return AMDGPU::S_CMP_GT_U32;
1070  case CmpInst::ICMP_UGE:
1071  return AMDGPU::S_CMP_GE_U32;
1072  case CmpInst::ICMP_ULT:
1073  return AMDGPU::S_CMP_LT_U32;
1074  case CmpInst::ICMP_ULE:
1075  return AMDGPU::S_CMP_LE_U32;
1076  default:
1077  llvm_unreachable("Unknown condition code!");
1078  }
1079 }
1080 
1081 bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
1082  MachineBasicBlock *BB = I.getParent();
1083  const DebugLoc &DL = I.getDebugLoc();
1084 
1085  Register SrcReg = I.getOperand(2).getReg();
1086  unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1087 
1088  auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
1089 
1090  Register CCReg = I.getOperand(0).getReg();
1091  if (!isVCC(CCReg, *MRI)) {
1092  int Opcode = getS_CMPOpcode(Pred, Size);
1093  if (Opcode == -1)
1094  return false;
1095  MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
1096  .add(I.getOperand(2))
1097  .add(I.getOperand(3));
1098  BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
1099  .addReg(AMDGPU::SCC);
1100  bool Ret =
1101  constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
1102  RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI);
1103  I.eraseFromParent();
1104  return Ret;
1105  }
1106 
1107  int Opcode = getV_CMPOpcode(Pred, Size);
1108  if (Opcode == -1)
1109  return false;
1110 
1111  MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
1112  I.getOperand(0).getReg())
1113  .add(I.getOperand(2))
1114  .add(I.getOperand(3));
1116  *TRI.getBoolRC(), *MRI);
1117  bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1118  I.eraseFromParent();
1119  return Ret;
1120 }
1121 
1122 bool AMDGPUInstructionSelector::selectIntrinsicIcmp(MachineInstr &I) const {
1123  Register Dst = I.getOperand(0).getReg();
1124  if (isVCC(Dst, *MRI))
1125  return false;
1126 
1127  if (MRI->getType(Dst).getSizeInBits() != STI.getWavefrontSize())
1128  return false;
1129 
1130  MachineBasicBlock *BB = I.getParent();
1131  const DebugLoc &DL = I.getDebugLoc();
1132  Register SrcReg = I.getOperand(2).getReg();
1133  unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1134 
1135  auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(4).getImm());
1136  if (!ICmpInst::isIntPredicate(static_cast<ICmpInst::Predicate>(Pred))) {
1137  MachineInstr *ICmp =
1138  BuildMI(*BB, &I, DL, TII.get(AMDGPU::IMPLICIT_DEF), Dst);
1139 
1140  if (!RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1141  *TRI.getBoolRC(), *MRI))
1142  return false;
1143  I.eraseFromParent();
1144  return true;
1145  }
1146 
1147  int Opcode = getV_CMPOpcode(Pred, Size);
1148  if (Opcode == -1)
1149  return false;
1150 
1151  MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode), Dst)
1152  .add(I.getOperand(2))
1153  .add(I.getOperand(3));
1154  RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), *TRI.getBoolRC(),
1155  *MRI);
1156  bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1157  I.eraseFromParent();
1158  return Ret;
1159 }
1160 
1161 bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const {
1162  MachineBasicBlock *BB = I.getParent();
1163  const DebugLoc &DL = I.getDebugLoc();
1164  Register DstReg = I.getOperand(0).getReg();
1165  const unsigned Size = MRI->getType(DstReg).getSizeInBits();
1166  const bool Is64 = Size == 64;
1167 
1168  if (Size != STI.getWavefrontSize())
1169  return false;
1170 
1172  getIConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI);
1173 
1174  if (Arg.hasValue()) {
1175  const int64_t Value = Arg.getValue().Value.getSExtValue();
1176  if (Value == 0) {
1177  unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
1178  BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0);
1179  } else if (Value == -1) { // all ones
1180  Register SrcReg = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
1181  BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1182  } else
1183  return false;
1184  } else {
1185  Register SrcReg = I.getOperand(2).getReg();
1186  BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1187  }
1188 
1189  I.eraseFromParent();
1190  return true;
1191 }
1192 
1193 bool AMDGPUInstructionSelector::selectRelocConstant(MachineInstr &I) const {
1194  Register DstReg = I.getOperand(0).getReg();
1195  const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
1196  const TargetRegisterClass *DstRC = TRI.getRegClassForSizeOnBank(32, *DstBank);
1197  if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
1198  return false;
1199 
1200  const bool IsVALU = DstBank->getID() == AMDGPU::VGPRRegBankID;
1201 
1202  Module *M = MF->getFunction().getParent();
1203  const MDNode *Metadata = I.getOperand(2).getMetadata();
1204  auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString();
1205  auto RelocSymbol = cast<GlobalVariable>(
1206  M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext())));
1207 
1208  MachineBasicBlock *BB = I.getParent();
1209  BuildMI(*BB, &I, I.getDebugLoc(),
1210  TII.get(IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32), DstReg)
1211  .addGlobalAddress(RelocSymbol, 0, SIInstrInfo::MO_ABS32_LO);
1212 
1213  I.eraseFromParent();
1214  return true;
1215 }
1216 
1217 bool AMDGPUInstructionSelector::selectGroupStaticSize(MachineInstr &I) const {
1219 
1220  Register DstReg = I.getOperand(0).getReg();
1221  const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1222  unsigned Mov = DstRB->getID() == AMDGPU::SGPRRegBankID ?
1223  AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1224 
1225  MachineBasicBlock *MBB = I.getParent();
1226  const DebugLoc &DL = I.getDebugLoc();
1227 
1228  auto MIB = BuildMI(*MBB, &I, DL, TII.get(Mov), DstReg);
1229 
1230  if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) {
1232  MIB.addImm(MFI->getLDSSize());
1233  } else {
1234  Module *M = MF->getFunction().getParent();
1235  const GlobalValue *GV
1236  = Intrinsic::getDeclaration(M, Intrinsic::amdgcn_groupstaticsize);
1238  }
1239 
1240  I.eraseFromParent();
1241  return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1242 }
1243 
1244 bool AMDGPUInstructionSelector::selectReturnAddress(MachineInstr &I) const {
1245  MachineBasicBlock *MBB = I.getParent();
1247  const DebugLoc &DL = I.getDebugLoc();
1248 
1249  MachineOperand &Dst = I.getOperand(0);
1250  Register DstReg = Dst.getReg();
1251  unsigned Depth = I.getOperand(2).getImm();
1252 
1253  const TargetRegisterClass *RC
1254  = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
1255  if (!RC->hasSubClassEq(&AMDGPU::SGPR_64RegClass) ||
1256  !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
1257  return false;
1258 
1259  // Check for kernel and shader functions
1260  if (Depth != 0 ||
1262  BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
1263  .addImm(0);
1264  I.eraseFromParent();
1265  return true;
1266  }
1267 
1268  MachineFrameInfo &MFI = MF.getFrameInfo();
1269  // There is a call to @llvm.returnaddress in this function
1270  MFI.setReturnAddressIsTaken(true);
1271 
1272  // Get the return address reg and mark it as an implicit live-in
1273  Register ReturnAddrReg = TRI.getReturnAddressReg(MF);
1274  Register LiveIn = getFunctionLiveInPhysReg(MF, TII, ReturnAddrReg,
1275  AMDGPU::SReg_64RegClass, DL);
1276  BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), DstReg)
1277  .addReg(LiveIn);
1278  I.eraseFromParent();
1279  return true;
1280 }
1281 
1282 bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const {
1283  // FIXME: Manually selecting to avoid dealing with the SReg_1 trick
1284  // SelectionDAG uses for wave32 vs wave64.
1285  MachineBasicBlock *BB = MI.getParent();
1286  BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF))
1287  .add(MI.getOperand(1));
1288 
1289  Register Reg = MI.getOperand(1).getReg();
1290  MI.eraseFromParent();
1291 
1292  if (!MRI->getRegClassOrNull(Reg))
1294  return true;
1295 }
1296 
1297 bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic(
1298  MachineInstr &MI, Intrinsic::ID IntrID) const {
1299  MachineBasicBlock *MBB = MI.getParent();
1301  const DebugLoc &DL = MI.getDebugLoc();
1302 
1303  unsigned IndexOperand = MI.getOperand(7).getImm();
1304  bool WaveRelease = MI.getOperand(8).getImm() != 0;
1305  bool WaveDone = MI.getOperand(9).getImm() != 0;
1306 
1307  if (WaveDone && !WaveRelease)
1308  report_fatal_error("ds_ordered_count: wave_done requires wave_release");
1309 
1310  unsigned OrderedCountIndex = IndexOperand & 0x3f;
1311  IndexOperand &= ~0x3f;
1312  unsigned CountDw = 0;
1313 
1314  if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) {
1315  CountDw = (IndexOperand >> 24) & 0xf;
1316  IndexOperand &= ~(0xf << 24);
1317 
1318  if (CountDw < 1 || CountDw > 4) {
1320  "ds_ordered_count: dword count must be between 1 and 4");
1321  }
1322  }
1323 
1324  if (IndexOperand)
1325  report_fatal_error("ds_ordered_count: bad index operand");
1326 
1327  unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
1328  unsigned ShaderType = SIInstrInfo::getDSShaderTypeValue(*MF);
1329 
1330  unsigned Offset0 = OrderedCountIndex << 2;
1331  unsigned Offset1 = WaveRelease | (WaveDone << 1) | (Instruction << 4);
1332 
1334  Offset1 |= (CountDw - 1) << 6;
1335 
1337  Offset1 |= ShaderType << 2;
1338 
1339  unsigned Offset = Offset0 | (Offset1 << 8);
1340 
1341  Register M0Val = MI.getOperand(2).getReg();
1342  BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1343  .addReg(M0Val);
1344 
1345  Register DstReg = MI.getOperand(0).getReg();
1346  Register ValReg = MI.getOperand(3).getReg();
1348  BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg)
1349  .addReg(ValReg)
1350  .addImm(Offset)
1351  .cloneMemRefs(MI);
1352 
1353  if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI))
1354  return false;
1355 
1356  bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI);
1357  MI.eraseFromParent();
1358  return Ret;
1359 }
1360 
1361 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
1362  switch (IntrID) {
1363  case Intrinsic::amdgcn_ds_gws_init:
1364  return AMDGPU::DS_GWS_INIT;
1365  case Intrinsic::amdgcn_ds_gws_barrier:
1366  return AMDGPU::DS_GWS_BARRIER;
1367  case Intrinsic::amdgcn_ds_gws_sema_v:
1368  return AMDGPU::DS_GWS_SEMA_V;
1369  case Intrinsic::amdgcn_ds_gws_sema_br:
1370  return AMDGPU::DS_GWS_SEMA_BR;
1371  case Intrinsic::amdgcn_ds_gws_sema_p:
1372  return AMDGPU::DS_GWS_SEMA_P;
1373  case Intrinsic::amdgcn_ds_gws_sema_release_all:
1374  return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
1375  default:
1376  llvm_unreachable("not a gws intrinsic");
1377  }
1378 }
1379 
1380 bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI,
1381  Intrinsic::ID IID) const {
1382  if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
1383  !STI.hasGWSSemaReleaseAll())
1384  return false;
1385 
1386  // intrinsic ID, vsrc, offset
1387  const bool HasVSrc = MI.getNumOperands() == 3;
1388  assert(HasVSrc || MI.getNumOperands() == 2);
1389 
1390  Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg();
1391  const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI);
1392  if (OffsetRB->getID() != AMDGPU::SGPRRegBankID)
1393  return false;
1394 
1395  MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1396  assert(OffsetDef);
1397 
1398  unsigned ImmOffset;
1399 
1400  MachineBasicBlock *MBB = MI.getParent();
1401  const DebugLoc &DL = MI.getDebugLoc();
1402 
1403  MachineInstr *Readfirstlane = nullptr;
1404 
1405  // If we legalized the VGPR input, strip out the readfirstlane to analyze the
1406  // incoming offset, in case there's an add of a constant. We'll have to put it
1407  // back later.
1408  if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) {
1409  Readfirstlane = OffsetDef;
1410  BaseOffset = OffsetDef->getOperand(1).getReg();
1411  OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1412  }
1413 
1414  if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) {
1415  // If we have a constant offset, try to use the 0 in m0 as the base.
1416  // TODO: Look into changing the default m0 initialization value. If the
1417  // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
1418  // the immediate offset.
1419 
1420  ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue();
1421  BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1422  .addImm(0);
1423  } else {
1424  std::tie(BaseOffset, ImmOffset) =
1425  AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset);
1426 
1427  if (Readfirstlane) {
1428  // We have the constant offset now, so put the readfirstlane back on the
1429  // variable component.
1430  if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI))
1431  return false;
1432 
1433  Readfirstlane->getOperand(1).setReg(BaseOffset);
1434  BaseOffset = Readfirstlane->getOperand(0).getReg();
1435  } else {
1436  if (!RBI.constrainGenericRegister(BaseOffset,
1437  AMDGPU::SReg_32RegClass, *MRI))
1438  return false;
1439  }
1440 
1441  Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1442  BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base)
1443  .addReg(BaseOffset)
1444  .addImm(16);
1445 
1446  BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1447  .addReg(M0Base);
1448  }
1449 
1450  // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
1451  // offset field) % 64. Some versions of the programming guide omit the m0
1452  // part, or claim it's from offset 0.
1453  auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID)));
1454 
1455  if (HasVSrc) {
1456  Register VSrc = MI.getOperand(1).getReg();
1457  MIB.addReg(VSrc);
1458 
1459  if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI))
1460  return false;
1461  }
1462 
1463  MIB.addImm(ImmOffset)
1464  .cloneMemRefs(MI);
1465 
1466  TII.enforceOperandRCAlignment(*MIB, AMDGPU::OpName::data0);
1467 
1468  MI.eraseFromParent();
1469  return true;
1470 }
1471 
1472 bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI,
1473  bool IsAppend) const {
1474  Register PtrBase = MI.getOperand(2).getReg();
1475  LLT PtrTy = MRI->getType(PtrBase);
1476  bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
1477 
1478  unsigned Offset;
1479  std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2));
1480 
1481  // TODO: Should this try to look through readfirstlane like GWS?
1482  if (!isDSOffsetLegal(PtrBase, Offset)) {
1483  PtrBase = MI.getOperand(2).getReg();
1484  Offset = 0;
1485  }
1486 
1487  MachineBasicBlock *MBB = MI.getParent();
1488  const DebugLoc &DL = MI.getDebugLoc();
1489  const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
1490 
1491  BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1492  .addReg(PtrBase);
1493  if (!RBI.constrainGenericRegister(PtrBase, AMDGPU::SReg_32RegClass, *MRI))
1494  return false;
1495 
1496  auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg())
1497  .addImm(Offset)
1498  .addImm(IsGDS ? -1 : 0)
1499  .cloneMemRefs(MI);
1500  MI.eraseFromParent();
1501  return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1502 }
1503 
1504 bool AMDGPUInstructionSelector::selectSBarrier(MachineInstr &MI) const {
1505  if (TM.getOptLevel() > CodeGenOpt::None) {
1506  unsigned WGSize = STI.getFlatWorkGroupSizes(MF->getFunction()).second;
1507  if (WGSize <= STI.getWavefrontSize()) {
1508  MachineBasicBlock *MBB = MI.getParent();
1509  const DebugLoc &DL = MI.getDebugLoc();
1510  BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::WAVE_BARRIER));
1511  MI.eraseFromParent();
1512  return true;
1513  }
1514  }
1515  return selectImpl(MI, *CoverageInfo);
1516 }
1517 
1518 static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE,
1519  bool &IsTexFail) {
1520  if (TexFailCtrl)
1521  IsTexFail = true;
1522 
1523  TFE = (TexFailCtrl & 0x1) ? true : false;
1524  TexFailCtrl &= ~(uint64_t)0x1;
1525  LWE = (TexFailCtrl & 0x2) ? true : false;
1526  TexFailCtrl &= ~(uint64_t)0x2;
1527 
1528  return TexFailCtrl == 0;
1529 }
1530 
1531 bool AMDGPUInstructionSelector::selectImageIntrinsic(
1532  MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const {
1533  MachineBasicBlock *MBB = MI.getParent();
1534  const DebugLoc &DL = MI.getDebugLoc();
1535 
1536  const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
1537  AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
1538 
1539  const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
1540  unsigned IntrOpcode = Intr->BaseOpcode;
1541  const bool IsGFX10Plus = AMDGPU::isGFX10Plus(STI);
1542  const bool IsGFX11Plus = AMDGPU::isGFX11Plus(STI);
1543 
1544  const unsigned ArgOffset = MI.getNumExplicitDefs() + 1;
1545 
1546  Register VDataIn, VDataOut;
1547  LLT VDataTy;
1548  int NumVDataDwords = -1;
1549  bool IsD16 = MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16 ||
1550  MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16;
1551 
1552  bool Unorm;
1553  if (!BaseOpcode->Sampler)
1554  Unorm = true;
1555  else
1556  Unorm = MI.getOperand(ArgOffset + Intr->UnormIndex).getImm() != 0;
1557 
1558  bool TFE;
1559  bool LWE;
1560  bool IsTexFail = false;
1561  if (!parseTexFail(MI.getOperand(ArgOffset + Intr->TexFailCtrlIndex).getImm(),
1562  TFE, LWE, IsTexFail))
1563  return false;
1564 
1565  const int Flags = MI.getOperand(ArgOffset + Intr->NumArgs).getImm();
1566  const bool IsA16 = (Flags & 1) != 0;
1567  const bool IsG16 = (Flags & 2) != 0;
1568 
1569  // A16 implies 16 bit gradients if subtarget doesn't support G16
1570  if (IsA16 && !STI.hasG16() && !IsG16)
1571  return false;
1572 
1573  unsigned DMask = 0;
1574  unsigned DMaskLanes = 0;
1575 
1576  if (BaseOpcode->Atomic) {
1577  VDataOut = MI.getOperand(0).getReg();
1578  VDataIn = MI.getOperand(2).getReg();
1579  LLT Ty = MRI->getType(VDataIn);
1580 
1581  // Be careful to allow atomic swap on 16-bit element vectors.
1582  const bool Is64Bit = BaseOpcode->AtomicX2 ?
1583  Ty.getSizeInBits() == 128 :
1584  Ty.getSizeInBits() == 64;
1585 
1586  if (BaseOpcode->AtomicX2) {
1587  assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister);
1588 
1589  DMask = Is64Bit ? 0xf : 0x3;
1590  NumVDataDwords = Is64Bit ? 4 : 2;
1591  } else {
1592  DMask = Is64Bit ? 0x3 : 0x1;
1593  NumVDataDwords = Is64Bit ? 2 : 1;
1594  }
1595  } else {
1596  DMask = MI.getOperand(ArgOffset + Intr->DMaskIndex).getImm();
1597  DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
1598 
1599  if (BaseOpcode->Store) {
1600  VDataIn = MI.getOperand(1).getReg();
1601  VDataTy = MRI->getType(VDataIn);
1602  NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32;
1603  } else {
1604  VDataOut = MI.getOperand(0).getReg();
1605  VDataTy = MRI->getType(VDataOut);
1606  NumVDataDwords = DMaskLanes;
1607 
1608  if (IsD16 && !STI.hasUnpackedD16VMem())
1609  NumVDataDwords = (DMaskLanes + 1) / 2;
1610  }
1611  }
1612 
1613  // Set G16 opcode
1614  if (IsG16 && !IsA16) {
1615  const AMDGPU::MIMGG16MappingInfo *G16MappingInfo =
1616  AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode);
1617  assert(G16MappingInfo);
1618  IntrOpcode = G16MappingInfo->G16; // set opcode to variant with _g16
1619  }
1620 
1621  // TODO: Check this in verifier.
1622  assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this");
1623 
1624  unsigned CPol = MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm();
1625  if (BaseOpcode->Atomic)
1626  CPol |= AMDGPU::CPol::GLC; // TODO no-return optimization
1627  if (CPol & ~AMDGPU::CPol::ALL)
1628  return false;
1629 
1630  int NumVAddrRegs = 0;
1631  int NumVAddrDwords = 0;
1632  for (unsigned I = Intr->VAddrStart; I < Intr->VAddrEnd; I++) {
1633  // Skip the $noregs and 0s inserted during legalization.
1634  MachineOperand &AddrOp = MI.getOperand(ArgOffset + I);
1635  if (!AddrOp.isReg())
1636  continue; // XXX - Break?
1637 
1638  Register Addr = AddrOp.getReg();
1639  if (!Addr)
1640  break;
1641 
1642  ++NumVAddrRegs;
1643  NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32;
1644  }
1645 
1646  // The legalizer preprocessed the intrinsic arguments. If we aren't using
1647  // NSA, these should have been packed into a single value in the first
1648  // address register
1649  const bool UseNSA = NumVAddrRegs != 1 && NumVAddrDwords == NumVAddrRegs;
1650  if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) {
1651  LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n");
1652  return false;
1653  }
1654 
1655  if (IsTexFail)
1656  ++NumVDataDwords;
1657 
1658  int Opcode = -1;
1659  if (IsGFX11Plus) {
1660  Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1661  UseNSA ? AMDGPU::MIMGEncGfx11NSA
1662  : AMDGPU::MIMGEncGfx11Default,
1663  NumVDataDwords, NumVAddrDwords);
1664  } else if (IsGFX10Plus) {
1665  Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1666  UseNSA ? AMDGPU::MIMGEncGfx10NSA
1667  : AMDGPU::MIMGEncGfx10Default,
1668  NumVDataDwords, NumVAddrDwords);
1669  } else {
1670  if (Subtarget->hasGFX90AInsts()) {
1671  Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx90a,
1672  NumVDataDwords, NumVAddrDwords);
1673  if (Opcode == -1) {
1674  LLVM_DEBUG(
1675  dbgs()
1676  << "requested image instruction is not supported on this GPU\n");
1677  return false;
1678  }
1679  }
1680  if (Opcode == -1 &&
1682  Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
1683  NumVDataDwords, NumVAddrDwords);
1684  if (Opcode == -1)
1685  Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
1686  NumVDataDwords, NumVAddrDwords);
1687  }
1688  assert(Opcode != -1);
1689 
1690  auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode))
1691  .cloneMemRefs(MI);
1692 
1693  if (VDataOut) {
1694  if (BaseOpcode->AtomicX2) {
1695  const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64;
1696 
1698  Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
1699  unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
1700 
1701  MIB.addDef(TmpReg);
1702  if (!MRI->use_empty(VDataOut)) {
1703  BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut)
1704  .addReg(TmpReg, RegState::Kill, SubReg);
1705  }
1706 
1707  } else {
1708  MIB.addDef(VDataOut); // vdata output
1709  }
1710  }
1711 
1712  if (VDataIn)
1713  MIB.addReg(VDataIn); // vdata input
1714 
1715  for (int I = 0; I != NumVAddrRegs; ++I) {
1716  MachineOperand &SrcOp = MI.getOperand(ArgOffset + Intr->VAddrStart + I);
1717  if (SrcOp.isReg()) {
1718  assert(SrcOp.getReg() != 0);
1719  MIB.addReg(SrcOp.getReg());
1720  }
1721  }
1722 
1723  MIB.addReg(MI.getOperand(ArgOffset + Intr->RsrcIndex).getReg());
1724  if (BaseOpcode->Sampler)
1725  MIB.addReg(MI.getOperand(ArgOffset + Intr->SampIndex).getReg());
1726 
1727  MIB.addImm(DMask); // dmask
1728 
1729  if (IsGFX10Plus)
1730  MIB.addImm(DimInfo->Encoding);
1731  MIB.addImm(Unorm);
1732 
1733  MIB.addImm(CPol);
1734  MIB.addImm(IsA16 && // a16 or r128
1735  STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0);
1736  if (IsGFX10Plus)
1737  MIB.addImm(IsA16 ? -1 : 0);
1738 
1739  if (!Subtarget->hasGFX90AInsts()) {
1740  MIB.addImm(TFE); // tfe
1741  } else if (TFE) {
1742  LLVM_DEBUG(dbgs() << "TFE is not supported on this GPU\n");
1743  return false;
1744  }
1745 
1746  MIB.addImm(LWE); // lwe
1747  if (!IsGFX10Plus)
1748  MIB.addImm(DimInfo->DA ? -1 : 0);
1749  if (BaseOpcode->HasD16)
1750  MIB.addImm(IsD16 ? -1 : 0);
1751 
1752  if (IsTexFail) {
1753  // An image load instruction with TFE/LWE only conditionally writes to its
1754  // result registers. Initialize them to zero so that we always get well
1755  // defined result values.
1756  assert(VDataOut && !VDataIn);
1757  Register Tied = MRI->cloneVirtualRegister(VDataOut);
1758  Register Zero = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1759  BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::V_MOV_B32_e32), Zero)
1760  .addImm(0);
1761  auto Parts = TRI.getRegSplitParts(MRI->getRegClass(Tied), 4);
1762  if (STI.usePRTStrictNull()) {
1763  // With enable-prt-strict-null enabled, initialize all result registers to
1764  // zero.
1765  auto RegSeq =
1766  BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
1767  for (auto Sub : Parts)
1768  RegSeq.addReg(Zero).addImm(Sub);
1769  } else {
1770  // With enable-prt-strict-null disabled, only initialize the extra TFE/LWE
1771  // result register.
1772  Register Undef = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1773  BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef);
1774  auto RegSeq =
1775  BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
1776  for (auto Sub : Parts.drop_back(1))
1777  RegSeq.addReg(Undef).addImm(Sub);
1778  RegSeq.addReg(Zero).addImm(Parts.back());
1779  }
1780  MIB.addReg(Tied, RegState::Implicit);
1781  MIB->tieOperands(0, MIB->getNumOperands() - 1);
1782  }
1783 
1784  MI.eraseFromParent();
1785  constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1786  TII.enforceOperandRCAlignment(*MIB, AMDGPU::OpName::vaddr);
1787  return true;
1788 }
1789 
1790 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
1791  MachineInstr &I) const {
1792  unsigned IntrinsicID = I.getIntrinsicID();
1793  switch (IntrinsicID) {
1794  case Intrinsic::amdgcn_end_cf:
1795  return selectEndCfIntrinsic(I);
1796  case Intrinsic::amdgcn_ds_ordered_add:
1797  case Intrinsic::amdgcn_ds_ordered_swap:
1798  return selectDSOrderedIntrinsic(I, IntrinsicID);
1799  case Intrinsic::amdgcn_ds_gws_init:
1800  case Intrinsic::amdgcn_ds_gws_barrier:
1801  case Intrinsic::amdgcn_ds_gws_sema_v:
1802  case Intrinsic::amdgcn_ds_gws_sema_br:
1803  case Intrinsic::amdgcn_ds_gws_sema_p:
1804  case Intrinsic::amdgcn_ds_gws_sema_release_all:
1805  return selectDSGWSIntrinsic(I, IntrinsicID);
1806  case Intrinsic::amdgcn_ds_append:
1807  return selectDSAppendConsume(I, true);
1808  case Intrinsic::amdgcn_ds_consume:
1809  return selectDSAppendConsume(I, false);
1810  case Intrinsic::amdgcn_s_barrier:
1811  return selectSBarrier(I);
1812  case Intrinsic::amdgcn_global_atomic_fadd:
1813  return selectGlobalAtomicFadd(I, I.getOperand(2), I.getOperand(3));
1814  case Intrinsic::amdgcn_raw_buffer_load_lds:
1815  case Intrinsic::amdgcn_struct_buffer_load_lds:
1816  return selectBufferLoadLds(I);
1817  case Intrinsic::amdgcn_global_load_lds:
1818  return selectGlobalLoadLds(I);
1819  default: {
1820  return selectImpl(I, *CoverageInfo);
1821  }
1822  }
1823 }
1824 
1825 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
1826  if (selectImpl(I, *CoverageInfo))
1827  return true;
1828 
1829  MachineBasicBlock *BB = I.getParent();
1830  const DebugLoc &DL = I.getDebugLoc();
1831 
1832  Register DstReg = I.getOperand(0).getReg();
1833  unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
1834  assert(Size <= 32 || Size == 64);
1835  const MachineOperand &CCOp = I.getOperand(1);
1836  Register CCReg = CCOp.getReg();
1837  if (!isVCC(CCReg, *MRI)) {
1838  unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
1839  AMDGPU::S_CSELECT_B32;
1840  MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
1841  .addReg(CCReg);
1842 
1843  // The generic constrainSelectedInstRegOperands doesn't work for the scc register
1844  // bank, because it does not cover the register class that we used to represent
1845  // for it. So we need to manually set the register class here.
1846  if (!MRI->getRegClassOrNull(CCReg))
1847  MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
1848  MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
1849  .add(I.getOperand(2))
1850  .add(I.getOperand(3));
1851 
1852  bool Ret = false;
1853  Ret |= constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1854  Ret |= constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
1855  I.eraseFromParent();
1856  return Ret;
1857  }
1858 
1859  // Wide VGPR select should have been split in RegBankSelect.
1860  if (Size > 32)
1861  return false;
1862 
1863  MachineInstr *Select =
1864  BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1865  .addImm(0)
1866  .add(I.getOperand(3))
1867  .addImm(0)
1868  .add(I.getOperand(2))
1869  .add(I.getOperand(1));
1870 
1871  bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1872  I.eraseFromParent();
1873  return Ret;
1874 }
1875 
1876 static int sizeToSubRegIndex(unsigned Size) {
1877  switch (Size) {
1878  case 32:
1879  return AMDGPU::sub0;
1880  case 64:
1881  return AMDGPU::sub0_sub1;
1882  case 96:
1883  return AMDGPU::sub0_sub1_sub2;
1884  case 128:
1885  return AMDGPU::sub0_sub1_sub2_sub3;
1886  case 256:
1887  return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
1888  default:
1889  if (Size < 32)
1890  return AMDGPU::sub0;
1891  if (Size > 256)
1892  return -1;
1893  return sizeToSubRegIndex(PowerOf2Ceil(Size));
1894  }
1895 }
1896 
1897 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
1898  Register DstReg = I.getOperand(0).getReg();
1899  Register SrcReg = I.getOperand(1).getReg();
1900  const LLT DstTy = MRI->getType(DstReg);
1901  const LLT SrcTy = MRI->getType(SrcReg);
1902  const LLT S1 = LLT::scalar(1);
1903 
1904  const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1905  const RegisterBank *DstRB;
1906  if (DstTy == S1) {
1907  // This is a special case. We don't treat s1 for legalization artifacts as
1908  // vcc booleans.
1909  DstRB = SrcRB;
1910  } else {
1911  DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1912  if (SrcRB != DstRB)
1913  return false;
1914  }
1915 
1916  const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
1917 
1918  unsigned DstSize = DstTy.getSizeInBits();
1919  unsigned SrcSize = SrcTy.getSizeInBits();
1920 
1921  const TargetRegisterClass *SrcRC =
1922  TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB);
1923  const TargetRegisterClass *DstRC =
1924  TRI.getRegClassForSizeOnBank(DstSize, *DstRB);
1925  if (!SrcRC || !DstRC)
1926  return false;
1927 
1928  if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
1929  !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1930  LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
1931  return false;
1932  }
1933 
1934  if (DstTy == LLT::fixed_vector(2, 16) && SrcTy == LLT::fixed_vector(2, 32)) {
1935  MachineBasicBlock *MBB = I.getParent();
1936  const DebugLoc &DL = I.getDebugLoc();
1937 
1938  Register LoReg = MRI->createVirtualRegister(DstRC);
1939  Register HiReg = MRI->createVirtualRegister(DstRC);
1940  BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg)
1941  .addReg(SrcReg, 0, AMDGPU::sub0);
1942  BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg)
1943  .addReg(SrcReg, 0, AMDGPU::sub1);
1944 
1945  if (IsVALU && STI.hasSDWA()) {
1946  // Write the low 16-bits of the high element into the high 16-bits of the
1947  // low element.
1948  MachineInstr *MovSDWA =
1949  BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
1950  .addImm(0) // $src0_modifiers
1951  .addReg(HiReg) // $src0
1952  .addImm(0) // $clamp
1953  .addImm(AMDGPU::SDWA::WORD_1) // $dst_sel
1954  .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
1955  .addImm(AMDGPU::SDWA::WORD_0) // $src0_sel
1956  .addReg(LoReg, RegState::Implicit);
1957  MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
1958  } else {
1959  Register TmpReg0 = MRI->createVirtualRegister(DstRC);
1960  Register TmpReg1 = MRI->createVirtualRegister(DstRC);
1961  Register ImmReg = MRI->createVirtualRegister(DstRC);
1962  if (IsVALU) {
1963  BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0)
1964  .addImm(16)
1965  .addReg(HiReg);
1966  } else {
1967  BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0)
1968  .addReg(HiReg)
1969  .addImm(16);
1970  }
1971 
1972  unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1973  unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
1974  unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32;
1975 
1976  BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg)
1977  .addImm(0xffff);
1978  BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1)
1979  .addReg(LoReg)
1980  .addReg(ImmReg);
1981  BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg)
1982  .addReg(TmpReg0)
1983  .addReg(TmpReg1);
1984  }
1985 
1986  I.eraseFromParent();
1987  return true;
1988  }
1989 
1990  if (!DstTy.isScalar())
1991  return false;
1992 
1993  if (SrcSize > 32) {
1994  int SubRegIdx = sizeToSubRegIndex(DstSize);
1995  if (SubRegIdx == -1)
1996  return false;
1997 
1998  // Deal with weird cases where the class only partially supports the subreg
1999  // index.
2000  const TargetRegisterClass *SrcWithSubRC
2001  = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
2002  if (!SrcWithSubRC)
2003  return false;
2004 
2005  if (SrcWithSubRC != SrcRC) {
2006  if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI))
2007  return false;
2008  }
2009 
2010  I.getOperand(1).setSubReg(SubRegIdx);
2011  }
2012 
2013  I.setDesc(TII.get(TargetOpcode::COPY));
2014  return true;
2015 }
2016 
2017 /// \returns true if a bitmask for \p Size bits will be an inline immediate.
2018 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
2019  Mask = maskTrailingOnes<unsigned>(Size);
2020  int SignedMask = static_cast<int>(Mask);
2021  return SignedMask >= -16 && SignedMask <= 64;
2022 }
2023 
2024 // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1.
2025 const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank(
2027  const TargetRegisterInfo &TRI) const {
2028  const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
2029  if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>())
2030  return RB;
2031 
2032  // Ignore the type, since we don't use vcc in artifacts.
2033  if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
2034  return &RBI.getRegBankFromRegClass(*RC, LLT());
2035  return nullptr;
2036 }
2037 
2038 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
2039  bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG;
2040  bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg;
2041  const DebugLoc &DL = I.getDebugLoc();
2042  MachineBasicBlock &MBB = *I.getParent();
2043  const Register DstReg = I.getOperand(0).getReg();
2044  const Register SrcReg = I.getOperand(1).getReg();
2045 
2046  const LLT DstTy = MRI->getType(DstReg);
2047  const LLT SrcTy = MRI->getType(SrcReg);
2048  const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ?
2049  I.getOperand(2).getImm() : SrcTy.getSizeInBits();
2050  const unsigned DstSize = DstTy.getSizeInBits();
2051  if (!DstTy.isScalar())
2052  return false;
2053 
2054  // Artifact casts should never use vcc.
2055  const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI);
2056 
2057  // FIXME: This should probably be illegal and split earlier.
2058  if (I.getOpcode() == AMDGPU::G_ANYEXT) {
2059  if (DstSize <= 32)
2060  return selectCOPY(I);
2061 
2062  const TargetRegisterClass *SrcRC =
2063  TRI.getRegClassForTypeOnBank(SrcTy, *SrcBank);
2064  const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
2065  const TargetRegisterClass *DstRC =
2066  TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
2067 
2068  Register UndefReg = MRI->createVirtualRegister(SrcRC);
2069  BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2070  BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2071  .addReg(SrcReg)
2072  .addImm(AMDGPU::sub0)
2073  .addReg(UndefReg)
2074  .addImm(AMDGPU::sub1);
2075  I.eraseFromParent();
2076 
2077  return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) &&
2078  RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI);
2079  }
2080 
2081  if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
2082  // 64-bit should have been split up in RegBankSelect
2083 
2084  // Try to use an and with a mask if it will save code size.
2085  unsigned Mask;
2086  if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2087  MachineInstr *ExtI =
2088  BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
2089  .addImm(Mask)
2090  .addReg(SrcReg);
2091  I.eraseFromParent();
2092  return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2093  }
2094 
2095  const unsigned BFE = Signed ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
2096  MachineInstr *ExtI =
2097  BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
2098  .addReg(SrcReg)
2099  .addImm(0) // Offset
2100  .addImm(SrcSize); // Width
2101  I.eraseFromParent();
2102  return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2103  }
2104 
2105  if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
2106  const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ?
2107  AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass;
2108  if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI))
2109  return false;
2110 
2111  if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
2112  const unsigned SextOpc = SrcSize == 8 ?
2113  AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
2114  BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
2115  .addReg(SrcReg);
2116  I.eraseFromParent();
2117  return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2118  }
2119 
2120  const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
2121  const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
2122 
2123  // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
2124  if (DstSize > 32 && (SrcSize <= 32 || InReg)) {
2125  // We need a 64-bit register source, but the high bits don't matter.
2126  Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
2127  Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2128  unsigned SubReg = InReg ? AMDGPU::sub0 : 0;
2129 
2130  BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2131  BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
2132  .addReg(SrcReg, 0, SubReg)
2133  .addImm(AMDGPU::sub0)
2134  .addReg(UndefReg)
2135  .addImm(AMDGPU::sub1);
2136 
2137  BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
2138  .addReg(ExtReg)
2139  .addImm(SrcSize << 16);
2140 
2141  I.eraseFromParent();
2142  return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI);
2143  }
2144 
2145  unsigned Mask;
2146  if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2147  BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
2148  .addReg(SrcReg)
2149  .addImm(Mask);
2150  } else {
2151  BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
2152  .addReg(SrcReg)
2153  .addImm(SrcSize << 16);
2154  }
2155 
2156  I.eraseFromParent();
2157  return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2158  }
2159 
2160  return false;
2161 }
2162 
2163 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
2164  MachineBasicBlock *BB = I.getParent();
2165  MachineOperand &ImmOp = I.getOperand(1);
2166  Register DstReg = I.getOperand(0).getReg();
2167  unsigned Size = MRI->getType(DstReg).getSizeInBits();
2168 
2169  // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
2170  if (ImmOp.isFPImm()) {
2171  const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
2172  ImmOp.ChangeToImmediate(Imm.getZExtValue());
2173  } else if (ImmOp.isCImm()) {
2174  ImmOp.ChangeToImmediate(ImmOp.getCImm()->getSExtValue());
2175  } else {
2176  llvm_unreachable("Not supported by g_constants");
2177  }
2178 
2179  const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2180  const bool IsSgpr = DstRB->getID() == AMDGPU::SGPRRegBankID;
2181 
2182  unsigned Opcode;
2183  if (DstRB->getID() == AMDGPU::VCCRegBankID) {
2184  Opcode = STI.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
2185  } else {
2186  Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
2187 
2188  // We should never produce s1 values on banks other than VCC. If the user of
2189  // this already constrained the register, we may incorrectly think it's VCC
2190  // if it wasn't originally.
2191  if (Size == 1)
2192  return false;
2193  }
2194 
2195  if (Size != 64) {
2196  I.setDesc(TII.get(Opcode));
2197  I.addImplicitDefUseOperands(*MF);
2198  return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
2199  }
2200 
2201  const DebugLoc &DL = I.getDebugLoc();
2202 
2203  APInt Imm(Size, I.getOperand(1).getImm());
2204 
2205  MachineInstr *ResInst;
2206  if (IsSgpr && TII.isInlineConstant(Imm)) {
2207  ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
2208  .addImm(I.getOperand(1).getImm());
2209  } else {
2210  const TargetRegisterClass *RC = IsSgpr ?
2211  &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass;
2212  Register LoReg = MRI->createVirtualRegister(RC);
2213  Register HiReg = MRI->createVirtualRegister(RC);
2214 
2215  BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
2216  .addImm(Imm.trunc(32).getZExtValue());
2217 
2218  BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
2219  .addImm(Imm.ashr(32).getZExtValue());
2220 
2221  ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2222  .addReg(LoReg)
2223  .addImm(AMDGPU::sub0)
2224  .addReg(HiReg)
2225  .addImm(AMDGPU::sub1);
2226  }
2227 
2228  // We can't call constrainSelectedInstRegOperands here, because it doesn't
2229  // work for target independent opcodes
2230  I.eraseFromParent();
2231  const TargetRegisterClass *DstRC =
2232  TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
2233  if (!DstRC)
2234  return true;
2235  return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
2236 }
2237 
2238 bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const {
2239  // Only manually handle the f64 SGPR case.
2240  //
2241  // FIXME: This is a workaround for 2.5 different tablegen problems. Because
2242  // the bit ops theoretically have a second result due to the implicit def of
2243  // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing
2244  // that is easy by disabling the check. The result works, but uses a
2245  // nonsensical sreg32orlds_and_sreg_1 regclass.
2246  //
2247  // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to
2248  // the variadic REG_SEQUENCE operands.
2249 
2250  Register Dst = MI.getOperand(0).getReg();
2251  const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2252  if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2253  MRI->getType(Dst) != LLT::scalar(64))
2254  return false;
2255 
2256  Register Src = MI.getOperand(1).getReg();
2257  MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI);
2258  if (Fabs)
2259  Src = Fabs->getOperand(1).getReg();
2260 
2261  if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2262  !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2263  return false;
2264 
2265  MachineBasicBlock *BB = MI.getParent();
2266  const DebugLoc &DL = MI.getDebugLoc();
2267  Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2268  Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2269  Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2270  Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2271 
2272  BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2273  .addReg(Src, 0, AMDGPU::sub0);
2274  BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2275  .addReg(Src, 0, AMDGPU::sub1);
2276  BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2277  .addImm(0x80000000);
2278 
2279  // Set or toggle sign bit.
2280  unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32;
2281  BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg)
2282  .addReg(HiReg)
2283  .addReg(ConstReg);
2284  BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2285  .addReg(LoReg)
2286  .addImm(AMDGPU::sub0)
2287  .addReg(OpReg)
2288  .addImm(AMDGPU::sub1);
2289  MI.eraseFromParent();
2290  return true;
2291 }
2292 
2293 // FIXME: This is a workaround for the same tablegen problems as G_FNEG
2294 bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const {
2295  Register Dst = MI.getOperand(0).getReg();
2296  const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2297  if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2298  MRI->getType(Dst) != LLT::scalar(64))
2299  return false;
2300 
2301  Register Src = MI.getOperand(1).getReg();
2302  MachineBasicBlock *BB = MI.getParent();
2303  const DebugLoc &DL = MI.getDebugLoc();
2304  Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2305  Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2306  Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2307  Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2308 
2309  if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2310  !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2311  return false;
2312 
2313  BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2314  .addReg(Src, 0, AMDGPU::sub0);
2315  BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2316  .addReg(Src, 0, AMDGPU::sub1);
2317  BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2318  .addImm(0x7fffffff);
2319 
2320  // Clear sign bit.
2321  // TODO: Should this used S_BITSET0_*?
2322  BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_AND_B32), OpReg)
2323  .addReg(HiReg)
2324  .addReg(ConstReg);
2325  BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2326  .addReg(LoReg)
2327  .addImm(AMDGPU::sub0)
2328  .addReg(OpReg)
2329  .addImm(AMDGPU::sub1);
2330 
2331  MI.eraseFromParent();
2332  return true;
2333 }
2334 
2335 static bool isConstant(const MachineInstr &MI) {
2336  return MI.getOpcode() == TargetOpcode::G_CONSTANT;
2337 }
2338 
2339 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
2340  const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
2341 
2342  const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
2343 
2344  assert(PtrMI);
2345 
2346  if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
2347  return;
2348 
2349  GEPInfo GEPInfo(*PtrMI);
2350 
2351  for (unsigned i = 1; i != 3; ++i) {
2352  const MachineOperand &GEPOp = PtrMI->getOperand(i);
2353  const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
2354  assert(OpDef);
2355  if (i == 2 && isConstant(*OpDef)) {
2356  // TODO: Could handle constant base + variable offset, but a combine
2357  // probably should have commuted it.
2358  assert(GEPInfo.Imm == 0);
2359  GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
2360  continue;
2361  }
2362  const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
2363  if (OpBank->getID() == AMDGPU::SGPRRegBankID)
2364  GEPInfo.SgprParts.push_back(GEPOp.getReg());
2365  else
2366  GEPInfo.VgprParts.push_back(GEPOp.getReg());
2367  }
2368 
2369  AddrInfo.push_back(GEPInfo);
2370  getAddrModeInfo(*PtrMI, MRI, AddrInfo);
2371 }
2372 
2373 bool AMDGPUInstructionSelector::isSGPR(Register Reg) const {
2374  return RBI.getRegBank(Reg, *MRI, TRI)->getID() == AMDGPU::SGPRRegBankID;
2375 }
2376 
2377 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
2378  if (!MI.hasOneMemOperand())
2379  return false;
2380 
2381  const MachineMemOperand *MMO = *MI.memoperands_begin();
2382  const Value *Ptr = MMO->getValue();
2383 
2384  // UndefValue means this is a load of a kernel input. These are uniform.
2385  // Sometimes LDS instructions have constant pointers.
2386  // If Ptr is null, then that means this mem operand contains a
2387  // PseudoSourceValue like GOT.
2388  if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
2389  isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
2390  return true;
2391 
2393  return true;
2394 
2395  const Instruction *I = dyn_cast<Instruction>(Ptr);
2396  return I && I->getMetadata("amdgpu.uniform");
2397 }
2398 
2399 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
2400  for (const GEPInfo &GEPInfo : AddrInfo) {
2401  if (!GEPInfo.VgprParts.empty())
2402  return true;
2403  }
2404  return false;
2405 }
2406 
2407 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
2408  const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2409  unsigned AS = PtrTy.getAddressSpace();
2410  if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
2411  STI.ldsRequiresM0Init()) {
2412  MachineBasicBlock *BB = I.getParent();
2413 
2414  // If DS instructions require M0 initialization, insert it before selecting.
2415  BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2416  .addImm(-1);
2417  }
2418 }
2419 
2420 bool AMDGPUInstructionSelector::selectG_LOAD_STORE_ATOMICRMW(
2421  MachineInstr &I) const {
2422  if (I.getOpcode() == TargetOpcode::G_ATOMICRMW_FADD) {
2423  const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2424  unsigned AS = PtrTy.getAddressSpace();
2425  if (AS == AMDGPUAS::GLOBAL_ADDRESS)
2426  return selectGlobalAtomicFadd(I, I.getOperand(1), I.getOperand(2));
2427  }
2428 
2429  initM0(I);
2430  return selectImpl(I, *CoverageInfo);
2431 }
2432 
2434  if (Reg.isPhysical())
2435  return false;
2436 
2438  const unsigned Opcode = MI.getOpcode();
2439 
2440  if (Opcode == AMDGPU::COPY)
2441  return isVCmpResult(MI.getOperand(1).getReg(), MRI);
2442 
2443  if (Opcode == AMDGPU::G_AND || Opcode == AMDGPU::G_OR ||
2444  Opcode == AMDGPU::G_XOR)
2445  return isVCmpResult(MI.getOperand(1).getReg(), MRI) &&
2446  isVCmpResult(MI.getOperand(2).getReg(), MRI);
2447 
2448  if (Opcode == TargetOpcode::G_INTRINSIC)
2449  return MI.getIntrinsicID() == Intrinsic::amdgcn_class;
2450 
2451  return Opcode == AMDGPU::G_ICMP || Opcode == AMDGPU::G_FCMP;
2452 }
2453 
2454 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
2455  MachineBasicBlock *BB = I.getParent();
2456  MachineOperand &CondOp = I.getOperand(0);
2457  Register CondReg = CondOp.getReg();
2458  const DebugLoc &DL = I.getDebugLoc();
2459 
2460  unsigned BrOpcode;
2461  Register CondPhysReg;
2462  const TargetRegisterClass *ConstrainRC;
2463 
2464  // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
2465  // whether the branch is uniform when selecting the instruction. In
2466  // GlobalISel, we should push that decision into RegBankSelect. Assume for now
2467  // RegBankSelect knows what it's doing if the branch condition is scc, even
2468  // though it currently does not.
2469  if (!isVCC(CondReg, *MRI)) {
2470  if (MRI->getType(CondReg) != LLT::scalar(32))
2471  return false;
2472 
2473  CondPhysReg = AMDGPU::SCC;
2474  BrOpcode = AMDGPU::S_CBRANCH_SCC1;
2475  ConstrainRC = &AMDGPU::SReg_32RegClass;
2476  } else {
2477  // FIXME: Should scc->vcc copies and with exec?
2478 
2479  // Unless the value of CondReg is a result of a V_CMP* instruction then we
2480  // need to insert an and with exec.
2481  if (!isVCmpResult(CondReg, *MRI)) {
2482  const bool Is64 = STI.isWave64();
2483  const unsigned Opcode = Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
2484  const Register Exec = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
2485 
2486  Register TmpReg = MRI->createVirtualRegister(TRI.getBoolRC());
2487  BuildMI(*BB, &I, DL, TII.get(Opcode), TmpReg)
2488  .addReg(CondReg)
2489  .addReg(Exec);
2490  CondReg = TmpReg;
2491  }
2492 
2493  CondPhysReg = TRI.getVCC();
2494  BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
2495  ConstrainRC = TRI.getBoolRC();
2496  }
2497 
2498  if (!MRI->getRegClassOrNull(CondReg))
2499  MRI->setRegClass(CondReg, ConstrainRC);
2500 
2501  BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
2502  .addReg(CondReg);
2503  BuildMI(*BB, &I, DL, TII.get(BrOpcode))
2504  .addMBB(I.getOperand(1).getMBB());
2505 
2506  I.eraseFromParent();
2507  return true;
2508 }
2509 
2510 bool AMDGPUInstructionSelector::selectG_GLOBAL_VALUE(
2511  MachineInstr &I) const {
2512  Register DstReg = I.getOperand(0).getReg();
2513  const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2514  const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2515  I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
2516  if (IsVGPR)
2517  I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
2518 
2519  return RBI.constrainGenericRegister(
2520  DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI);
2521 }
2522 
2523 bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const {
2524  Register DstReg = I.getOperand(0).getReg();
2525  Register SrcReg = I.getOperand(1).getReg();
2526  Register MaskReg = I.getOperand(2).getReg();
2527  LLT Ty = MRI->getType(DstReg);
2528  LLT MaskTy = MRI->getType(MaskReg);
2529  MachineBasicBlock *BB = I.getParent();
2530  const DebugLoc &DL = I.getDebugLoc();
2531 
2532  const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2533  const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2534  const RegisterBank *MaskRB = RBI.getRegBank(MaskReg, *MRI, TRI);
2535  const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2536  if (DstRB != SrcRB) // Should only happen for hand written MIR.
2537  return false;
2538 
2539  // Try to avoid emitting a bit operation when we only need to touch half of
2540  // the 64-bit pointer.
2541  APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zext(64);
2542  const APInt MaskHi32 = APInt::getHighBitsSet(64, 32);
2543  const APInt MaskLo32 = APInt::getLowBitsSet(64, 32);
2544 
2545  const bool CanCopyLow32 = (MaskOnes & MaskLo32) == MaskLo32;
2546  const bool CanCopyHi32 = (MaskOnes & MaskHi32) == MaskHi32;
2547 
2548  if (!IsVGPR && Ty.getSizeInBits() == 64 &&
2549  !CanCopyLow32 && !CanCopyHi32) {
2550  auto MIB = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_AND_B64), DstReg)
2551  .addReg(SrcReg)
2552  .addReg(MaskReg);
2553  I.eraseFromParent();
2554  return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2555  }
2556 
2557  unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
2558  const TargetRegisterClass &RegRC
2559  = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2560 
2561  const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB);
2562  const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB);
2563  const TargetRegisterClass *MaskRC =
2564  TRI.getRegClassForTypeOnBank(MaskTy, *MaskRB);
2565 
2566  if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2567  !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2568  !RBI.constrainGenericRegister(MaskReg, *MaskRC, *MRI))
2569  return false;
2570 
2571  if (Ty.getSizeInBits() == 32) {
2572  assert(MaskTy.getSizeInBits() == 32 &&
2573  "ptrmask should have been narrowed during legalize");
2574 
2575  BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg)
2576  .addReg(SrcReg)
2577  .addReg(MaskReg);
2578  I.eraseFromParent();
2579  return true;
2580  }
2581 
2582  Register HiReg = MRI->createVirtualRegister(&RegRC);
2583  Register LoReg = MRI->createVirtualRegister(&RegRC);
2584 
2585  // Extract the subregisters from the source pointer.
2586  BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg)
2587  .addReg(SrcReg, 0, AMDGPU::sub0);
2588  BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg)
2589  .addReg(SrcReg, 0, AMDGPU::sub1);
2590 
2591  Register MaskedLo, MaskedHi;
2592 
2593  if (CanCopyLow32) {
2594  // If all the bits in the low half are 1, we only need a copy for it.
2595  MaskedLo = LoReg;
2596  } else {
2597  // Extract the mask subregister and apply the and.
2598  Register MaskLo = MRI->createVirtualRegister(&RegRC);
2599  MaskedLo = MRI->createVirtualRegister(&RegRC);
2600 
2601  BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskLo)
2602  .addReg(MaskReg, 0, AMDGPU::sub0);
2603  BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedLo)
2604  .addReg(LoReg)
2605  .addReg(MaskLo);
2606  }
2607 
2608  if (CanCopyHi32) {
2609  // If all the bits in the high half are 1, we only need a copy for it.
2610  MaskedHi = HiReg;
2611  } else {
2612  Register MaskHi = MRI->createVirtualRegister(&RegRC);
2613  MaskedHi = MRI->createVirtualRegister(&RegRC);
2614 
2615  BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskHi)
2616  .addReg(MaskReg, 0, AMDGPU::sub1);
2617  BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedHi)
2618  .addReg(HiReg)
2619  .addReg(MaskHi);
2620  }
2621 
2622  BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2623  .addReg(MaskedLo)
2624  .addImm(AMDGPU::sub0)
2625  .addReg(MaskedHi)
2626  .addImm(AMDGPU::sub1);
2627  I.eraseFromParent();
2628  return true;
2629 }
2630 
2631 /// Return the register to use for the index value, and the subregister to use
2632 /// for the indirectly accessed register.
2633 static std::pair<Register, unsigned>
2635  const SIRegisterInfo &TRI,
2636  const TargetRegisterClass *SuperRC,
2637  Register IdxReg,
2638  unsigned EltSize) {
2639  Register IdxBaseReg;
2640  int Offset;
2641 
2642  std::tie(IdxBaseReg, Offset) = AMDGPU::getBaseWithConstantOffset(MRI, IdxReg);
2643  if (IdxBaseReg == AMDGPU::NoRegister) {
2644  // This will happen if the index is a known constant. This should ordinarily
2645  // be legalized out, but handle it as a register just in case.
2646  assert(Offset == 0);
2647  IdxBaseReg = IdxReg;
2648  }
2649 
2650  ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize);
2651 
2652  // Skip out of bounds offsets, or else we would end up using an undefined
2653  // register.
2654  if (static_cast<unsigned>(Offset) >= SubRegs.size())
2655  return std::make_pair(IdxReg, SubRegs[0]);
2656  return std::make_pair(IdxBaseReg, SubRegs[Offset]);
2657 }
2658 
2659 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
2660  MachineInstr &MI) const {
2661  Register DstReg = MI.getOperand(0).getReg();
2662  Register SrcReg = MI.getOperand(1).getReg();
2663  Register IdxReg = MI.getOperand(2).getReg();
2664 
2665  LLT DstTy = MRI->getType(DstReg);
2666  LLT SrcTy = MRI->getType(SrcReg);
2667 
2668  const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2669  const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2670  const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2671 
2672  // The index must be scalar. If it wasn't RegBankSelect should have moved this
2673  // into a waterfall loop.
2674  if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2675  return false;
2676 
2677  const TargetRegisterClass *SrcRC =
2678  TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB);
2679  const TargetRegisterClass *DstRC =
2680  TRI.getRegClassForTypeOnBank(DstTy, *DstRB);
2681  if (!SrcRC || !DstRC)
2682  return false;
2683  if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2684  !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2685  !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2686  return false;
2687 
2688  MachineBasicBlock *BB = MI.getParent();
2689  const DebugLoc &DL = MI.getDebugLoc();
2690  const bool Is64 = DstTy.getSizeInBits() == 64;
2691 
2692  unsigned SubReg;
2693  std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, SrcRC, IdxReg,
2694  DstTy.getSizeInBits() / 8);
2695 
2696  if (SrcRB->getID() == AMDGPU::SGPRRegBankID) {
2697  if (DstTy.getSizeInBits() != 32 && !Is64)
2698  return false;
2699 
2700  BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2701  .addReg(IdxReg);
2702 
2703  unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32;
2704  BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg)
2705  .addReg(SrcReg, 0, SubReg)
2706  .addReg(SrcReg, RegState::Implicit);
2707  MI.eraseFromParent();
2708  return true;
2709  }
2710 
2711  if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32)
2712  return false;
2713 
2714  if (!STI.useVGPRIndexMode()) {
2715  BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2716  .addReg(IdxReg);
2717  BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg)
2718  .addReg(SrcReg, 0, SubReg)
2719  .addReg(SrcReg, RegState::Implicit);
2720  MI.eraseFromParent();
2721  return true;
2722  }
2723 
2724  const MCInstrDesc &GPRIDXDesc =
2725  TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*SrcRC), true);
2726  BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2727  .addReg(SrcReg)
2728  .addReg(IdxReg)
2729  .addImm(SubReg);
2730 
2731  MI.eraseFromParent();
2732  return true;
2733 }
2734 
2735 // TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd
2736 bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT(
2737  MachineInstr &MI) const {
2738  Register DstReg = MI.getOperand(0).getReg();
2739  Register VecReg = MI.getOperand(1).getReg();
2740  Register ValReg = MI.getOperand(2).getReg();
2741  Register IdxReg = MI.getOperand(3).getReg();
2742 
2743  LLT VecTy = MRI->getType(DstReg);
2744  LLT ValTy = MRI->getType(ValReg);
2745  unsigned VecSize = VecTy.getSizeInBits();
2746  unsigned ValSize = ValTy.getSizeInBits();
2747 
2748  const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI);
2749  const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI);
2750  const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2751 
2752  assert(VecTy.getElementType() == ValTy);
2753 
2754  // The index must be scalar. If it wasn't RegBankSelect should have moved this
2755  // into a waterfall loop.
2756  if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2757  return false;
2758 
2759  const TargetRegisterClass *VecRC =
2760  TRI.getRegClassForTypeOnBank(VecTy, *VecRB);
2761  const TargetRegisterClass *ValRC =
2762  TRI.getRegClassForTypeOnBank(ValTy, *ValRB);
2763 
2764  if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) ||
2765  !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) ||
2766  !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) ||
2767  !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2768  return false;
2769 
2770  if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32)
2771  return false;
2772 
2773  unsigned SubReg;
2774  std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg,
2775  ValSize / 8);
2776 
2777  const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID &&
2778  STI.useVGPRIndexMode();
2779 
2780  MachineBasicBlock *BB = MI.getParent();
2781  const DebugLoc &DL = MI.getDebugLoc();
2782 
2783  if (!IndexMode) {
2784  BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2785  .addReg(IdxReg);
2786 
2787  const MCInstrDesc &RegWriteOp = TII.getIndirectRegWriteMovRelPseudo(
2788  VecSize, ValSize, VecRB->getID() == AMDGPU::SGPRRegBankID);
2789  BuildMI(*BB, MI, DL, RegWriteOp, DstReg)
2790  .addReg(VecReg)
2791  .addReg(ValReg)
2792  .addImm(SubReg);
2793  MI.eraseFromParent();
2794  return true;
2795  }
2796 
2797  const MCInstrDesc &GPRIDXDesc =
2798  TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false);
2799  BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2800  .addReg(VecReg)
2801  .addReg(ValReg)
2802  .addReg(IdxReg)
2803  .addImm(SubReg);
2804 
2805  MI.eraseFromParent();
2806  return true;
2807 }
2808 
2809 static bool isZeroOrUndef(int X) {
2810  return X == 0 || X == -1;
2811 }
2812 
2813 static bool isOneOrUndef(int X) {
2814  return X == 1 || X == -1;
2815 }
2816 
2817 static bool isZeroOrOneOrUndef(int X) {
2818  return X == 0 || X == 1 || X == -1;
2819 }
2820 
2821 // Normalize a VOP3P shuffle mask to refer to the low/high half of a single
2822 // 32-bit register.
2823 static Register normalizeVOP3PMask(int NewMask[2], Register Src0, Register Src1,
2824  ArrayRef<int> Mask) {
2825  NewMask[0] = Mask[0];
2826  NewMask[1] = Mask[1];
2828  return Src0;
2829 
2830  assert(NewMask[0] == 2 || NewMask[0] == 3 || NewMask[0] == -1);
2831  assert(NewMask[1] == 2 || NewMask[1] == 3 || NewMask[1] == -1);
2832 
2833  // Shift the mask inputs to be 0/1;
2834  NewMask[0] = NewMask[0] == -1 ? -1 : NewMask[0] - 2;
2835  NewMask[1] = NewMask[1] == -1 ? -1 : NewMask[1] - 2;
2836  return Src1;
2837 }
2838 
2839 // This is only legal with VOP3P instructions as an aid to op_sel matching.
2840 bool AMDGPUInstructionSelector::selectG_SHUFFLE_VECTOR(
2841  MachineInstr &MI) const {
2842  Register DstReg = MI.getOperand(0).getReg();
2843  Register Src0Reg = MI.getOperand(1).getReg();
2844  Register Src1Reg = MI.getOperand(2).getReg();
2845  ArrayRef<int> ShufMask = MI.getOperand(3).getShuffleMask();
2846 
2847  const LLT V2S16 = LLT::fixed_vector(2, 16);
2848  if (MRI->getType(DstReg) != V2S16 || MRI->getType(Src0Reg) != V2S16)
2849  return false;
2850 
2851  if (!AMDGPU::isLegalVOP3PShuffleMask(ShufMask))
2852  return false;
2853 
2854  assert(ShufMask.size() == 2);
2855 
2856  MachineBasicBlock *MBB = MI.getParent();
2857  const DebugLoc &DL = MI.getDebugLoc();
2858 
2859  const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2860  const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
2861  const TargetRegisterClass &RC = IsVALU ?
2862  AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2863 
2864  // Handle the degenerate case which should have folded out.
2865  if (ShufMask[0] == -1 && ShufMask[1] == -1) {
2866  BuildMI(*MBB, MI, DL, TII.get(AMDGPU::IMPLICIT_DEF), DstReg);
2867 
2868  MI.eraseFromParent();
2869  return RBI.constrainGenericRegister(DstReg, RC, *MRI);
2870  }
2871 
2872  // A legal VOP3P mask only reads one of the sources.
2873  int Mask[2];
2874  Register SrcVec = normalizeVOP3PMask(Mask, Src0Reg, Src1Reg, ShufMask);
2875 
2876  if (!RBI.constrainGenericRegister(DstReg, RC, *MRI) ||
2877  !RBI.constrainGenericRegister(SrcVec, RC, *MRI))
2878  return false;
2879 
2880  // TODO: This also should have been folded out
2881  if (isZeroOrUndef(Mask[0]) && isOneOrUndef(Mask[1])) {
2882  BuildMI(*MBB, MI, DL, TII.get(AMDGPU::COPY), DstReg)
2883  .addReg(SrcVec);
2884 
2885  MI.eraseFromParent();
2886  return true;
2887  }
2888 
2889  if (Mask[0] == 1 && Mask[1] == -1) {
2890  if (IsVALU) {
2891  BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
2892  .addImm(16)
2893  .addReg(SrcVec);
2894  } else {
2895  BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
2896  .addReg(SrcVec)
2897  .addImm(16);
2898  }
2899  } else if (Mask[0] == -1 && Mask[1] == 0) {
2900  if (IsVALU) {
2901  BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), DstReg)
2902  .addImm(16)
2903  .addReg(SrcVec);
2904  } else {
2905  BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHL_B32), DstReg)
2906  .addReg(SrcVec)
2907  .addImm(16);
2908  }
2909  } else if (Mask[0] == 0 && Mask[1] == 0) {
2910  if (IsVALU) {
2911  if (STI.hasSDWA()) {
2912  // Write low half of the register into the high half.
2913  MachineInstr *MovSDWA =
2914  BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2915  .addImm(0) // $src0_modifiers
2916  .addReg(SrcVec) // $src0
2917  .addImm(0) // $clamp
2918  .addImm(AMDGPU::SDWA::WORD_1) // $dst_sel
2919  .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2920  .addImm(AMDGPU::SDWA::WORD_0) // $src0_sel
2921  .addReg(SrcVec, RegState::Implicit);
2922  MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2923  } else {
2924  Register TmpReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2925  BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_AND_B32_e32), TmpReg)
2926  .addImm(0xFFFF)
2927  .addReg(SrcVec);
2928  BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHL_OR_B32_e64), DstReg)
2929  .addReg(TmpReg)
2930  .addImm(16)
2931  .addReg(TmpReg);
2932  }
2933  } else {
2934  BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2935  .addReg(SrcVec)
2936  .addReg(SrcVec);
2937  }
2938  } else if (Mask[0] == 1 && Mask[1] == 1) {
2939  if (IsVALU) {
2940  if (STI.hasSDWA()) {
2941  // Write high half of the register into the low half.
2942  MachineInstr *MovSDWA =
2943  BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2944  .addImm(0) // $src0_modifiers
2945  .addReg(SrcVec) // $src0
2946  .addImm(0) // $clamp
2947  .addImm(AMDGPU::SDWA::WORD_0) // $dst_sel
2948  .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2949  .addImm(AMDGPU::SDWA::WORD_1) // $src0_sel
2950  .addReg(SrcVec, RegState::Implicit);
2951  MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2952  } else {
2953  Register TmpReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2954  BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), TmpReg)
2955  .addImm(16)
2956  .addReg(SrcVec);
2957  BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHL_OR_B32_e64), DstReg)
2958  .addReg(TmpReg)
2959  .addImm(16)
2960  .addReg(TmpReg);
2961  }
2962  } else {
2963  BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_HH_B32_B16), DstReg)
2964  .addReg(SrcVec)
2965  .addReg(SrcVec);
2966  }
2967  } else if (Mask[0] == 1 && Mask[1] == 0) {
2968  if (IsVALU) {
2969  BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_ALIGNBIT_B32_e64), DstReg)
2970  .addReg(SrcVec)
2971  .addReg(SrcVec)
2972  .addImm(16);
2973  } else {
2974  if (STI.hasSPackHL()) {
2975  BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_HL_B32_B16), DstReg)
2976  .addReg(SrcVec)
2977  .addReg(SrcVec);
2978  } else {
2979  Register TmpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2980  BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), TmpReg)
2981  .addReg(SrcVec)
2982  .addImm(16);
2983  BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2984  .addReg(TmpReg)
2985  .addReg(SrcVec);
2986  }
2987  }
2988  } else
2989  llvm_unreachable("all shuffle masks should be handled");
2990 
2991  MI.eraseFromParent();
2992  return true;
2993 }
2994 
2995 bool AMDGPUInstructionSelector::selectAMDGPU_BUFFER_ATOMIC_FADD(
2996  MachineInstr &MI) const {
2997  const Register DefReg = MI.getOperand(0).getReg();
2998  LLT DefTy = MRI->getType(DefReg);
2999  if (AMDGPU::hasAtomicFaddRtnForTy(STI, DefTy))
3000  return selectImpl(MI, *CoverageInfo);
3001 
3002  MachineBasicBlock *MBB = MI.getParent();
3003  const DebugLoc &DL = MI.getDebugLoc();
3004 
3005  if (!MRI->use_nodbg_empty(DefReg)) {
3006  Function &F = MBB->getParent()->getFunction();
3008  NoFpRet(F, "return versions of fp atomics not supported",
3009  MI.getDebugLoc(), DS_Error);
3010  F.getContext().diagnose(NoFpRet);
3011  return false;
3012  }
3013 
3014  // FIXME: This is only needed because tablegen requires number of dst operands
3015  // in match and replace pattern to be the same. Otherwise patterns can be
3016  // exported from SDag path.
3017  MachineOperand &VDataIn = MI.getOperand(1);
3018  MachineOperand &VIndex = MI.getOperand(3);
3019  MachineOperand &VOffset = MI.getOperand(4);
3020  MachineOperand &SOffset = MI.getOperand(5);
3021  int16_t Offset = MI.getOperand(6).getImm();
3022 
3023  bool HasVOffset = !isOperandImmEqual(VOffset, 0, *MRI);
3024  bool HasVIndex = !isOperandImmEqual(VIndex, 0, *MRI);
3025 
3026  unsigned Opcode;
3027  if (HasVOffset) {
3028  Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN
3029  : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN;
3030  } else {
3031  Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN
3032  : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET;
3033  }
3034 
3035  if (MRI->getType(VDataIn.getReg()).isVector()) {
3036  switch (Opcode) {
3037  case AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN:
3038  Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN;
3039  break;
3040  case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN:
3041  Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFEN;
3042  break;
3043  case AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN:
3044  Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_IDXEN;
3045  break;
3046  case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET:
3047  Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFSET;
3048  break;
3049  }
3050  }
3051 
3052  auto I = BuildMI(*MBB, MI, DL, TII.get(Opcode));
3053  I.add(VDataIn);
3054 
3055  if (Opcode == AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN ||
3056  Opcode == AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN) {
3057  Register IdxReg = MRI->createVirtualRegister(TRI.getVGPR64Class());
3058  BuildMI(*MBB, &*I, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg)
3059  .addReg(VIndex.getReg())
3060  .addImm(AMDGPU::sub0)
3061  .addReg(VOffset.getReg())
3062  .addImm(AMDGPU::sub1);
3063 
3064  I.addReg(IdxReg);
3065  } else if (HasVIndex) {
3066  I.add(VIndex);
3067  } else if (HasVOffset) {
3068  I.add(VOffset);
3069  }
3070 
3071  I.add(MI.getOperand(2)); // rsrc
3072  I.add(SOffset);
3073  I.addImm(Offset);
3074  I.addImm(MI.getOperand(7).getImm()); // cpol
3075  I.cloneMemRefs(MI);
3076 
3077  MI.eraseFromParent();
3078 
3079  return true;
3080 }
3081 
3082 bool AMDGPUInstructionSelector::selectGlobalAtomicFadd(
3083  MachineInstr &MI, MachineOperand &AddrOp, MachineOperand &DataOp) const {
3084 
3085  if (STI.hasGFX90AInsts()) {
3086  // gfx90a adds return versions of the global atomic fadd instructions so no
3087  // special handling is required.
3088  return selectImpl(MI, *CoverageInfo);
3089  }
3090 
3091  MachineBasicBlock *MBB = MI.getParent();
3092  const DebugLoc &DL = MI.getDebugLoc();
3093 
3094  if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
3095  Function &F = MBB->getParent()->getFunction();
3097  NoFpRet(F, "return versions of fp atomics not supported",
3098  MI.getDebugLoc(), DS_Error);
3099  F.getContext().diagnose(NoFpRet);
3100  return false;
3101  }
3102 
3103  // FIXME: This is only needed because tablegen requires number of dst operands
3104  // in match and replace pattern to be the same. Otherwise patterns can be
3105  // exported from SDag path.
3106  auto Addr = selectFlatOffsetImpl(AddrOp, SIInstrFlags::FlatGlobal);
3107 
3108  Register Data = DataOp.getReg();
3109  const unsigned Opc = MRI->getType(Data).isVector() ?
3110  AMDGPU::GLOBAL_ATOMIC_PK_ADD_F16 : AMDGPU::GLOBAL_ATOMIC_ADD_F32;
3111  auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc))
3112  .addReg(Addr.first)
3113  .addReg(Data)
3114  .addImm(Addr.second)
3115  .addImm(0) // cpol
3116  .cloneMemRefs(MI);
3117 
3118  MI.eraseFromParent();
3119  return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3120 }
3121 
3122 bool AMDGPUInstructionSelector::selectBufferLoadLds(MachineInstr &MI) const {
3123  unsigned Opc;
3124  unsigned Size = MI.getOperand(3).getImm();
3125 
3126  // The struct intrinsic variants add one additional operand over raw.
3127  const bool HasVIndex = MI.getNumOperands() == 9;
3128  Register VIndex;
3129  int OpOffset = 0;
3130  if (HasVIndex) {
3131  VIndex = MI.getOperand(4).getReg();
3132  OpOffset = 1;
3133  }
3134 
3135  Register VOffset = MI.getOperand(4 + OpOffset).getReg();
3136  Optional<ValueAndVReg> MaybeVOffset =
3137  getIConstantVRegValWithLookThrough(VOffset, *MRI);
3138  const bool HasVOffset = !MaybeVOffset || MaybeVOffset->Value.getZExtValue();
3139 
3140  switch (Size) {
3141  default:
3142  return false;
3143  case 1:
3144  Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_UBYTE_LDS_BOTHEN
3145  : AMDGPU::BUFFER_LOAD_UBYTE_LDS_IDXEN
3146  : HasVOffset ? AMDGPU::BUFFER_LOAD_UBYTE_LDS_OFFEN
3147  : AMDGPU::BUFFER_LOAD_UBYTE_LDS_OFFSET;
3148  break;
3149  case 2:
3150  Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_USHORT_LDS_BOTHEN
3151  : AMDGPU::BUFFER_LOAD_USHORT_LDS_IDXEN
3152  : HasVOffset ? AMDGPU::BUFFER_LOAD_USHORT_LDS_OFFEN
3153  : AMDGPU::BUFFER_LOAD_USHORT_LDS_OFFSET;
3154  break;
3155  case 4:
3156  Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_BOTHEN
3157  : AMDGPU::BUFFER_LOAD_DWORD_LDS_IDXEN
3158  : HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFEN
3159  : AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFSET;
3160  break;
3161  }
3162 
3163  MachineBasicBlock *MBB = MI.getParent();
3164  const DebugLoc &DL = MI.getDebugLoc();
3165  BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
3166  .add(MI.getOperand(2));
3167 
3168  auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc));
3169 
3170  if (HasVIndex && HasVOffset) {
3171  Register IdxReg = MRI->createVirtualRegister(TRI.getVGPR64Class());
3172  BuildMI(*MBB, &*MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg)
3173  .addReg(VIndex)
3174  .addImm(AMDGPU::sub0)
3175  .addReg(VOffset)
3176  .addImm(AMDGPU::sub1);
3177 
3178  MIB.addReg(IdxReg);
3179  } else if (HasVIndex) {
3180  MIB.addReg(VIndex);
3181  } else if (HasVOffset) {
3182  MIB.addReg(VOffset);
3183  }
3184 
3185  MIB.add(MI.getOperand(1)); // rsrc
3186  MIB.add(MI.getOperand(5 + OpOffset)); // soffset
3187  MIB.add(MI.getOperand(6 + OpOffset)); // imm offset
3188  unsigned Aux = MI.getOperand(7 + OpOffset).getImm();
3189  MIB.addImm(Aux & AMDGPU::CPol::ALL); // cpol
3190  MIB.addImm((Aux >> 3) & 1); // swz
3191 
3192  MachineMemOperand *LoadMMO = *MI.memoperands_begin();
3193  MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo();
3194  LoadPtrI.Offset = MI.getOperand(6 + OpOffset).getImm();
3195  MachinePointerInfo StorePtrI = LoadPtrI;
3196  StorePtrI.V = nullptr;
3197  StorePtrI.AddrSpace = AMDGPUAS::LOCAL_ADDRESS;
3198 
3199  auto F = LoadMMO->getFlags() &
3201  LoadMMO = MF->getMachineMemOperand(LoadPtrI, F | MachineMemOperand::MOLoad,
3202  Size, LoadMMO->getBaseAlign());
3203 
3204  MachineMemOperand *StoreMMO =
3206  sizeof(int32_t), LoadMMO->getBaseAlign());
3207 
3208  MIB.setMemRefs({LoadMMO, StoreMMO});
3209 
3210  MI.eraseFromParent();
3211  return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3212 }
3213 
3214 /// Match a zero extend from a 32-bit value to 64-bits.
3216  Register ZExtSrc;
3217  if (mi_match(Reg, MRI, m_GZExt(m_Reg(ZExtSrc))))
3218  return MRI.getType(ZExtSrc) == LLT::scalar(32) ? ZExtSrc : Register();
3219 
3220  // Match legalized form %zext = G_MERGE_VALUES (s32 %x), (s32 0)
3222  if (Def->getOpcode() != AMDGPU::G_MERGE_VALUES)
3223  return false;
3224 
3225  if (mi_match(Def->getOperand(2).getReg(), MRI, m_ZeroInt())) {
3226  return Def->getOperand(1).getReg();
3227  }
3228 
3229  return Register();
3230 }
3231 
3232 bool AMDGPUInstructionSelector::selectGlobalLoadLds(MachineInstr &MI) const{
3233  unsigned Opc;
3234  unsigned Size = MI.getOperand(3).getImm();
3235 
3236  switch (Size) {
3237  default:
3238  return false;
3239  case 1:
3240  Opc = AMDGPU::GLOBAL_LOAD_LDS_UBYTE;
3241  break;
3242  case 2:
3243  Opc = AMDGPU::GLOBAL_LOAD_LDS_USHORT;
3244  break;
3245  case 4:
3246  Opc = AMDGPU::GLOBAL_LOAD_LDS_DWORD;
3247  break;
3248  }
3249 
3250  MachineBasicBlock *MBB = MI.getParent();
3251  const DebugLoc &DL = MI.getDebugLoc();
3252  BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
3253  .add(MI.getOperand(2));
3254 
3255  Register Addr = MI.getOperand(1).getReg();
3256  Register VOffset;
3257  // Try to split SAddr and VOffset. Global and LDS pointers share the same
3258  // immediate offset, so we cannot use a regular SelectGlobalSAddr().
3259  if (!isSGPR(Addr)) {
3260  auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3261  if (isSGPR(AddrDef->Reg)) {
3262  Addr = AddrDef->Reg;
3263  } else if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
3264  Register SAddr =
3265  getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI);
3266  if (SAddr && isSGPR(SAddr)) {
3267  Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg();
3268  if (Register Off = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) {
3269  Addr = SAddr;
3270  VOffset = Off;
3271  }
3272  }
3273  }
3274  }
3275 
3276  if (isSGPR(Addr)) {
3277  Opc = AMDGPU::getGlobalSaddrOp(Opc);
3278  if (!VOffset) {
3279  VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3280  BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_MOV_B32_e32), VOffset)
3281  .addImm(0);
3282  }
3283  }
3284 
3285  auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc))
3286  .addReg(Addr);
3287 
3288  if (isSGPR(Addr))
3289  MIB.addReg(VOffset);
3290 
3291  MIB.add(MI.getOperand(4)) // offset
3292  .add(MI.getOperand(5)); // cpol
3293 
3294  MachineMemOperand *LoadMMO = *MI.memoperands_begin();
3295  MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo();
3296  LoadPtrI.Offset = MI.getOperand(4).getImm();
3297  MachinePointerInfo StorePtrI = LoadPtrI;
3299  StorePtrI.AddrSpace = AMDGPUAS::LOCAL_ADDRESS;
3300  auto F = LoadMMO->getFlags() &
3302  LoadMMO = MF->getMachineMemOperand(LoadPtrI, F | MachineMemOperand::MOLoad,
3303  Size, LoadMMO->getBaseAlign());
3304  MachineMemOperand *StoreMMO =
3306  sizeof(int32_t), Align(4));
3307 
3308  MIB.setMemRefs({LoadMMO, StoreMMO});
3309 
3310  MI.eraseFromParent();
3311  return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3312 }
3313 
3314 bool AMDGPUInstructionSelector::selectBVHIntrinsic(MachineInstr &MI) const{
3315  MI.setDesc(TII.get(MI.getOperand(1).getImm()));
3316  MI.removeOperand(1);
3317  MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3318  return true;
3319 }
3320 
3321 bool AMDGPUInstructionSelector::selectSMFMACIntrin(MachineInstr &MI) const {
3322  unsigned Opc;
3323  switch (MI.getIntrinsicID()) {
3324  case Intrinsic::amdgcn_smfmac_f32_16x16x32_f16:
3325  Opc = AMDGPU::V_SMFMAC_F32_16X16X32_F16_e64;
3326  break;
3327  case Intrinsic::amdgcn_smfmac_f32_32x32x16_f16:
3328  Opc = AMDGPU::V_SMFMAC_F32_32X32X16_F16_e64;
3329  break;
3330  case Intrinsic::amdgcn_smfmac_f32_16x16x32_bf16:
3331  Opc = AMDGPU::V_SMFMAC_F32_16X16X32_BF16_e64;
3332  break;
3333  case Intrinsic::amdgcn_smfmac_f32_32x32x16_bf16:
3334  Opc = AMDGPU::V_SMFMAC_F32_32X32X16_BF16_e64;
3335  break;
3336  case Intrinsic::amdgcn_smfmac_i32_16x16x64_i8:
3337  Opc = AMDGPU::V_SMFMAC_I32_16X16X64_I8_e64;
3338  break;
3339  case Intrinsic::amdgcn_smfmac_i32_32x32x32_i8:
3340  Opc = AMDGPU::V_SMFMAC_I32_32X32X32_I8_e64;
3341  break;
3342  default:
3343  llvm_unreachable("unhandled smfmac intrinsic");
3344  }
3345 
3346  auto VDst_In = MI.getOperand(4);
3347 
3348  MI.setDesc(TII.get(Opc));
3349  MI.removeOperand(4); // VDst_In
3350  MI.removeOperand(1); // Intrinsic ID
3351  MI.addOperand(VDst_In); // Readd VDst_In to the end
3352  MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3353  return true;
3354 }
3355 
3356 bool AMDGPUInstructionSelector::selectWaveAddress(MachineInstr &MI) const {
3357  Register DstReg = MI.getOperand(0).getReg();
3358  Register SrcReg = MI.getOperand(1).getReg();
3359  const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
3360  const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
3361  MachineBasicBlock *MBB = MI.getParent();
3362  const DebugLoc &DL = MI.getDebugLoc();
3363 
3364  if (IsVALU) {
3365  BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
3366  .addImm(Subtarget->getWavefrontSizeLog2())
3367  .addReg(SrcReg);
3368  } else {
3369  BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
3370  .addReg(SrcReg)
3371  .addImm(Subtarget->getWavefrontSizeLog2());
3372  }
3373 
3374  const TargetRegisterClass &RC =
3375  IsVALU ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
3376  if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
3377  return false;
3378 
3379  MI.eraseFromParent();
3380  return true;
3381 }
3382 
3384  if (I.isPHI())
3385  return selectPHI(I);
3386 
3387  if (!I.isPreISelOpcode()) {
3388  if (I.isCopy())
3389  return selectCOPY(I);
3390  return true;
3391  }
3392 
3393  switch (I.getOpcode()) {
3394  case TargetOpcode::G_AND:
3395  case TargetOpcode::G_OR:
3396  case TargetOpcode::G_XOR:
3397  if (selectImpl(I, *CoverageInfo))
3398  return true;
3399  return selectG_AND_OR_XOR(I);
3400  case TargetOpcode::G_ADD:
3401  case TargetOpcode::G_SUB:
3402  if (selectImpl(I, *CoverageInfo))
3403  return true;
3404  return selectG_ADD_SUB(I);
3405  case TargetOpcode::G_UADDO:
3406  case TargetOpcode::G_USUBO:
3407  case TargetOpcode::G_UADDE:
3408  case TargetOpcode::G_USUBE:
3409  return selectG_UADDO_USUBO_UADDE_USUBE(I);
3410  case AMDGPU::G_AMDGPU_MAD_U64_U32:
3411  case AMDGPU::G_AMDGPU_MAD_I64_I32:
3412  return selectG_AMDGPU_MAD_64_32(I);
3413  case TargetOpcode::G_INTTOPTR:
3414  case TargetOpcode::G_BITCAST:
3415  case TargetOpcode::G_PTRTOINT:
3416  return selectCOPY(I);
3417  case TargetOpcode::G_CONSTANT:
3418  case TargetOpcode::G_FCONSTANT:
3419  return selectG_CONSTANT(I);
3420  case TargetOpcode::G_FNEG:
3421  if (selectImpl(I, *CoverageInfo))
3422  return true;
3423  return selectG_FNEG(I);
3424  case TargetOpcode::G_FABS:
3425  if (selectImpl(I, *CoverageInfo))
3426  return true;
3427  return selectG_FABS(I);
3428  case TargetOpcode::G_EXTRACT:
3429  return selectG_EXTRACT(I);
3430  case TargetOpcode::G_MERGE_VALUES:
3431  case TargetOpcode::G_BUILD_VECTOR:
3432  case TargetOpcode::G_CONCAT_VECTORS:
3433  return selectG_MERGE_VALUES(I);
3434  case TargetOpcode::G_UNMERGE_VALUES:
3435  return selectG_UNMERGE_VALUES(I);
3436  case TargetOpcode::G_BUILD_VECTOR_TRUNC:
3437  return selectG_BUILD_VECTOR_TRUNC(I);
3438  case TargetOpcode::G_PTR_ADD:
3439  return selectG_PTR_ADD(I);
3440  case TargetOpcode::G_IMPLICIT_DEF:
3441  return selectG_IMPLICIT_DEF(I);
3442  case TargetOpcode::G_FREEZE:
3443  return selectCOPY(I);
3444  case TargetOpcode::G_INSERT:
3445  return selectG_INSERT(I);
3446  case TargetOpcode::G_INTRINSIC:
3447  return selectG_INTRINSIC(I);
3448  case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
3449  return selectG_INTRINSIC_W_SIDE_EFFECTS(I);
3450  case TargetOpcode::G_ICMP:
3451  if (selectG_ICMP(I))
3452  return true;
3453  return selectImpl(I, *CoverageInfo);
3454  case TargetOpcode::G_LOAD:
3455  case TargetOpcode::G_STORE:
3456  case TargetOpcode::G_ATOMIC_CMPXCHG:
3457  case TargetOpcode::G_ATOMICRMW_XCHG:
3458  case TargetOpcode::G_ATOMICRMW_ADD:
3459  case TargetOpcode::G_ATOMICRMW_SUB:
3460  case TargetOpcode::G_ATOMICRMW_AND:
3461  case TargetOpcode::G_ATOMICRMW_OR:
3462  case TargetOpcode::G_ATOMICRMW_XOR:
3463  case TargetOpcode::G_ATOMICRMW_MIN:
3464  case TargetOpcode::G_ATOMICRMW_MAX:
3465  case TargetOpcode::G_ATOMICRMW_UMIN:
3466  case TargetOpcode::G_ATOMICRMW_UMAX:
3467  case TargetOpcode::G_ATOMICRMW_FADD:
3468  case AMDGPU::G_AMDGPU_ATOMIC_INC:
3469  case AMDGPU::G_AMDGPU_ATOMIC_DEC:
3470  case AMDGPU::G_AMDGPU_ATOMIC_FMIN:
3471  case AMDGPU::G_AMDGPU_ATOMIC_FMAX:
3472  return selectG_LOAD_STORE_ATOMICRMW(I);
3473  case TargetOpcode::G_SELECT:
3474  return selectG_SELECT(I);
3475  case TargetOpcode::G_TRUNC:
3476  return selectG_TRUNC(I);
3477  case TargetOpcode::G_SEXT:
3478  case TargetOpcode::G_ZEXT:
3479  case TargetOpcode::G_ANYEXT:
3480  case TargetOpcode::G_SEXT_INREG:
3481  if (selectImpl(I, *CoverageInfo))
3482  return true;
3483  return selectG_SZA_EXT(I);
3484  case TargetOpcode::G_BRCOND:
3485  return selectG_BRCOND(I);
3486  case TargetOpcode::G_GLOBAL_VALUE:
3487  return selectG_GLOBAL_VALUE(I);
3488  case TargetOpcode::G_PTRMASK:
3489  return selectG_PTRMASK(I);
3490  case TargetOpcode::G_EXTRACT_VECTOR_ELT:
3491  return selectG_EXTRACT_VECTOR_ELT(I);
3492  case TargetOpcode::G_INSERT_VECTOR_ELT:
3493  return selectG_INSERT_VECTOR_ELT(I);
3494  case TargetOpcode::G_SHUFFLE_VECTOR:
3495  return selectG_SHUFFLE_VECTOR(I);
3496  case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
3497  case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16:
3498  case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE:
3499  case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16: {
3501  = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID());
3502  assert(Intr && "not an image intrinsic with image pseudo");
3503  return selectImageIntrinsic(I, Intr);
3504  }
3505  case AMDGPU::G_AMDGPU_INTRIN_BVH_INTERSECT_RAY:
3506  return selectBVHIntrinsic(I);
3507  case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD:
3508  return selectAMDGPU_BUFFER_ATOMIC_FADD(I);
3509  case AMDGPU::G_SBFX:
3510  case AMDGPU::G_UBFX:
3511  return selectG_SBFX_UBFX(I);
3512  case AMDGPU::G_SI_CALL:
3513  I.setDesc(TII.get(AMDGPU::SI_CALL));
3514  return true;
3515  case AMDGPU::G_AMDGPU_WAVE_ADDRESS:
3516  return selectWaveAddress(I);
3517  default:
3518  return selectImpl(I, *CoverageInfo);
3519  }
3520  return false;
3521 }
3522 
3524 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
3525  return {{
3526  [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3527  }};
3528 
3529 }
3530 
3531 std::pair<Register, unsigned> AMDGPUInstructionSelector::selectVOP3ModsImpl(
3532  MachineOperand &Root, bool AllowAbs, bool OpSel, bool ForceVGPR) const {
3533  Register Src = Root.getReg();
3534  Register OrigSrc = Src;
3535  unsigned Mods = 0;
3536  MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
3537 
3538  if (MI && MI->getOpcode() == AMDGPU::G_FNEG) {
3539  Src = MI->getOperand(1).getReg();
3540  Mods |= SISrcMods::NEG;
3541  MI = getDefIgnoringCopies(Src, *MRI);
3542  }
3543 
3544  if (AllowAbs && MI && MI->getOpcode() == AMDGPU::G_FABS) {
3545  Src = MI->getOperand(1).getReg();
3546  Mods |= SISrcMods::ABS;
3547  }
3548 
3549  if (OpSel)
3550  Mods |= SISrcMods::OP_SEL_0;
3551 
3552  if ((Mods != 0 || ForceVGPR) &&
3553  RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) {
3554  MachineInstr *UseMI = Root.getParent();
3555 
3556  // If we looked through copies to find source modifiers on an SGPR operand,
3557  // we now have an SGPR register source. To avoid potentially violating the
3558  // constant bus restriction, we need to insert a copy to a VGPR.
3559  Register VGPRSrc = MRI->cloneVirtualRegister(OrigSrc);
3561  TII.get(AMDGPU::COPY), VGPRSrc)
3562  .addReg(Src);
3563  Src = VGPRSrc;
3564  }
3565 
3566  return std::make_pair(Src, Mods);
3567 }
3568 
3569 ///
3570 /// This will select either an SGPR or VGPR operand and will save us from
3571 /// having to write an extra tablegen pattern.
3573 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
3574  return {{
3575  [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3576  }};
3577 }
3578 
3580 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
3581  Register Src;
3582  unsigned Mods;
3583  std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3584 
3585  return {{
3586  [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3587  [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3588  [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3589  [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod
3590  }};
3591 }
3592 
3594 AMDGPUInstructionSelector::selectVOP3BMods0(MachineOperand &Root) const {
3595  Register Src;
3596  unsigned Mods;
3597  std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3598 
3599  return {{
3600  [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3601  [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3602  [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3603  [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod
3604  }};
3605 }
3606 
3608 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
3609  return {{
3610  [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
3611  [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3612  [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod
3613  }};
3614 }
3615 
3617 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
3618  Register Src;
3619  unsigned Mods;
3620  std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3621 
3622  return {{
3623  [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3624  [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3625  }};
3626 }
3627 
3629 AMDGPUInstructionSelector::selectVOP3BMods(MachineOperand &Root) const {
3630  Register Src;
3631  unsigned Mods;
3632  std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3633 
3634  return {{
3635  [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3636  [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3637  }};
3638 }
3639 
3641 AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const {
3642  Register Reg = Root.getReg();
3643  const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI);
3644  if (Def && (Def->getOpcode() == AMDGPU::G_FNEG ||
3645  Def->getOpcode() == AMDGPU::G_FABS))
3646  return {};
3647  return {{
3648  [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3649  }};
3650 }
3651 
3652 std::pair<Register, unsigned>
3653 AMDGPUInstructionSelector::selectVOP3PModsImpl(
3654  Register Src, const MachineRegisterInfo &MRI, bool IsDOT) const {
3655  unsigned Mods = 0;
3656  MachineInstr *MI = MRI.getVRegDef(Src);
3657 
3658  if (MI && MI->getOpcode() == AMDGPU::G_FNEG &&
3659  // It's possible to see an f32 fneg here, but unlikely.
3660  // TODO: Treat f32 fneg as only high bit.
3661  MRI.getType(Src) == LLT::fixed_vector(2, 16)) {
3662  Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
3663  Src = MI->getOperand(1).getReg();
3664  MI = MRI.getVRegDef(Src);
3665  }
3666 
3667  // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector.
3668  (void)IsDOT; // DOTs do not use OPSEL on gfx940+, check ST.hasDOTOpSelHazard()
3669 
3670  // Packed instructions do not have abs modifiers.
3671  Mods |= SISrcMods::OP_SEL_1;
3672 
3673  return std::make_pair(Src, Mods);
3674 }
3675 
3677 AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const {
3679  = Root.getParent()->getParent()->getParent()->getRegInfo();
3680 
3681  Register Src;
3682  unsigned Mods;
3683  std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI);
3684 
3685  return {{
3686  [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3687  [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3688  }};
3689 }
3690 
3692 AMDGPUInstructionSelector::selectVOP3PModsDOT(MachineOperand &Root) const {
3694  = Root.getParent()->getParent()->getParent()->getRegInfo();
3695 
3696  Register Src;
3697  unsigned Mods;
3698  std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI, true);
3699 
3700  return {{
3701  [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3702  [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3703  }};
3704 }
3705 
3707 AMDGPUInstructionSelector::selectDotIUVOP3PMods(MachineOperand &Root) const {
3708  // Literal i1 value set in intrinsic, represents SrcMods for the next operand.
3709  // Value is in Imm operand as i1 sign extended to int64_t.
3710  // 1(-1) promotes packed values to signed, 0 treats them as unsigned.
3711  assert((Root.isImm() && (Root.getImm() == -1 || Root.getImm() == 0)) &&
3712  "expected i1 value");
3713  unsigned Mods = SISrcMods::OP_SEL_1;
3714  if (Root.getImm() == -1)
3715  Mods ^= SISrcMods::NEG;
3716  return {{
3717  [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3718  }};
3719 }
3720 
3722 AMDGPUInstructionSelector::selectVOP3Mods_nnan(MachineOperand &Root) const {
3723  Register Src;
3724  unsigned Mods;
3725  std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3726  if (!isKnownNeverNaN(Src, *MRI))
3727  return None;
3728 
3729  return {{
3730  [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3731  [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3732  }};
3733 }
3734 
3736 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
3737  // FIXME: Handle op_sel
3738  return {{
3739  [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
3740  [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods
3741  }};
3742 }
3743 
3745 AMDGPUInstructionSelector::selectVINTERPMods(MachineOperand &Root) const {
3746  Register Src;
3747  unsigned Mods;
3748  std::tie(Src, Mods) = selectVOP3ModsImpl(Root,
3749  /* AllowAbs */ false,
3750  /* OpSel */ false,
3751  /* ForceVGPR */ true);
3752 
3753  return {{
3754  [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3755  [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3756  }};
3757 }
3758 
3760 AMDGPUInstructionSelector::selectVINTERPModsHi(MachineOperand &Root) const {
3761  Register Src;
3762  unsigned Mods;
3763  std::tie(Src, Mods) = selectVOP3ModsImpl(Root,
3764  /* AllowAbs */ false,
3765  /* OpSel */ true,
3766  /* ForceVGPR */ true);
3767 
3768  return {{
3769  [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3770  [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3771  }};
3772 }
3773 
3775 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
3776  SmallVector<GEPInfo, 4> AddrInfo;
3777  getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3778 
3779  if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3780  return None;
3781 
3782  const GEPInfo &GEPInfo = AddrInfo[0];
3783  Optional<int64_t> EncodedImm =
3784  AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm, false);
3785  if (!EncodedImm)
3786  return None;
3787 
3788  unsigned PtrReg = GEPInfo.SgprParts[0];
3789  return {{
3790  [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3791  [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3792  }};
3793 }
3794 
3796 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
3797  SmallVector<GEPInfo, 4> AddrInfo;
3798  getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3799 
3800  if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3801  return None;
3802 
3803  const GEPInfo &GEPInfo = AddrInfo[0];
3804  Register PtrReg = GEPInfo.SgprParts[0];
3805  Optional<int64_t> EncodedImm =
3806  AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm);
3807  if (!EncodedImm)
3808  return None;
3809 
3810  return {{
3811  [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3812  [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3813  }};
3814 }
3815 
3817 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
3818  MachineInstr *MI = Root.getParent();
3819  MachineBasicBlock *MBB = MI->getParent();
3820 
3821  SmallVector<GEPInfo, 4> AddrInfo;
3822  getAddrModeInfo(*MI, *MRI, AddrInfo);
3823 
3824  // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
3825  // then we can select all ptr + 32-bit offsets not just immediate offsets.
3826  if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3827  return None;
3828 
3829  const GEPInfo &GEPInfo = AddrInfo[0];
3830  // SGPR offset is unsigned.
3831  if (!GEPInfo.Imm || GEPInfo.Imm < 0 || !isUInt<32>(GEPInfo.Imm))
3832  return None;
3833 
3834  // If we make it this far we have a load with an 32-bit immediate offset.
3835  // It is OK to select this using a sgpr offset, because we have already
3836  // failed trying to select this load into one of the _IMM variants since
3837  // the _IMM Patterns are considered before the _SGPR patterns.
3838  Register PtrReg = GEPInfo.SgprParts[0];
3839  Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3840  BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg)
3841  .addImm(GEPInfo.Imm);
3842  return {{
3843  [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3844  [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); }
3845  }};
3846 }
3847 
3848 std::pair<Register, int>
3849 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root,
3850  uint64_t FlatVariant) const {
3851  MachineInstr *MI = Root.getParent();
3852 
3853  auto Default = std::make_pair(Root.getReg(), 0);
3854 
3855  if (!STI.hasFlatInstOffsets())
3856  return Default;
3857 
3858  Register PtrBase;
3859  int64_t ConstOffset;
3860  std::tie(PtrBase, ConstOffset) =
3861  getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3862  if (ConstOffset == 0)
3863  return Default;
3864 
3865  unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
3866  if (!TII.isLegalFLATOffset(ConstOffset, AddrSpace, FlatVariant))
3867  return Default;
3868 
3869  return std::make_pair(PtrBase, ConstOffset);
3870 }
3871 
3873 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
3874  auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FLAT);
3875 
3876  return {{
3877  [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3878  [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3879  }};
3880 }
3881 
3883 AMDGPUInstructionSelector::selectGlobalOffset(MachineOperand &Root) const {
3884  auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatGlobal);
3885 
3886  return {{
3887  [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3888  [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3889  }};
3890 }
3891 
3893 AMDGPUInstructionSelector::selectScratchOffset(MachineOperand &Root) const {
3894  auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatScratch);
3895 
3896  return {{
3897  [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3898  [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3899  }};
3900 }
3901 
3902 // Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset)
3904 AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const {
3905  Register Addr = Root.getReg();
3906  Register PtrBase;
3907  int64_t ConstOffset;
3908  int64_t ImmOffset = 0;
3909 
3910  // Match the immediate offset first, which canonically is moved as low as
3911  // possible.
3912  std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3913 
3914  if (ConstOffset != 0) {
3915  if (TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::GLOBAL_ADDRESS,
3917  Addr = PtrBase;
3918  ImmOffset = ConstOffset;
3919  } else {
3920  auto PtrBaseDef = getDefSrcRegIgnoringCopies(PtrBase, *MRI);
3921  if (isSGPR(PtrBaseDef->Reg)) {
3922  if (ConstOffset > 0) {
3923  // Offset is too large.
3924  //
3925  // saddr + large_offset -> saddr +
3926  // (voffset = large_offset & ~MaxOffset) +
3927  // (large_offset & MaxOffset);
3928  int64_t SplitImmOffset, RemainderOffset;
3929  std::tie(SplitImmOffset, RemainderOffset) = TII.splitFlatOffset(
3931 
3932  if (isUInt<32>(RemainderOffset)) {
3933  MachineInstr *MI = Root.getParent();
3934  MachineBasicBlock *MBB = MI->getParent();
3935  Register HighBits =
3936  MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3937 
3938  BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3939  HighBits)
3940  .addImm(RemainderOffset);
3941 
3942  return {{
3943  [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrBase); }, // saddr
3944  [=](MachineInstrBuilder &MIB) {
3945  MIB.addReg(HighBits);
3946  }, // voffset
3947  [=](MachineInstrBuilder &MIB) { MIB.addImm(SplitImmOffset); },
3948  }};
3949  }
3950  }
3951 
3952  // We are adding a 64 bit SGPR and a constant. If constant bus limit
3953  // is 1 we would need to perform 1 or 2 extra moves for each half of
3954  // the constant and it is better to do a scalar add and then issue a
3955  // single VALU instruction to materialize zero. Otherwise it is less
3956  // instructions to perform VALU adds with immediates or inline literals.
3957  unsigned NumLiterals =
3958  !TII.isInlineConstant(APInt(32, ConstOffset & 0xffffffff)) +
3959  !TII.isInlineConstant(APInt(32, ConstOffset >> 32));
3960  if (STI.getConstantBusLimit(AMDGPU::V_ADD_U32_e64) > NumLiterals)
3961  return None;
3962  }
3963  }
3964  }
3965 
3966  // Match the variable offset.
3967  auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3968  if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
3969  // Look through the SGPR->VGPR copy.
3970  Register SAddr =
3971  getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI);
3972 
3973  if (SAddr && isSGPR(SAddr)) {
3974  Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg();
3975 
3976  // It's possible voffset is an SGPR here, but the copy to VGPR will be
3977  // inserted later.
3978  if (Register VOffset = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) {
3979  return {{[=](MachineInstrBuilder &MIB) { // saddr
3980  MIB.addReg(SAddr);
3981  },
3982  [=](MachineInstrBuilder &MIB) { // voffset
3983  MIB.addReg(VOffset);
3984  },
3985  [=](MachineInstrBuilder &MIB) { // offset
3986  MIB.addImm(ImmOffset);
3987  }}};
3988  }
3989  }
3990  }
3991 
3992  // FIXME: We should probably have folded COPY (G_IMPLICIT_DEF) earlier, and
3993  // drop this.
3994  if (AddrDef->MI->getOpcode() == AMDGPU::G_IMPLICIT_DEF ||
3995  AddrDef->MI->getOpcode() == AMDGPU::G_CONSTANT || !isSGPR(AddrDef->Reg))
3996  return None;
3997 
3998  // It's cheaper to materialize a single 32-bit zero for vaddr than the two
3999  // moves required to copy a 64-bit SGPR to VGPR.
4000  MachineInstr *MI = Root.getParent();
4001  MachineBasicBlock *MBB = MI->getParent();
4002  Register VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4003 
4004  BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32), VOffset)
4005  .addImm(0);
4006 
4007  return {{
4008  [=](MachineInstrBuilder &MIB) { MIB.addReg(AddrDef->Reg); }, // saddr
4009  [=](MachineInstrBuilder &MIB) { MIB.addReg(VOffset); }, // voffset
4010  [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4011  }};
4012 }
4013 
4015 AMDGPUInstructionSelector::selectScratchSAddr(MachineOperand &Root) const {
4016  Register Addr = Root.getReg();
4017  Register PtrBase;
4018  int64_t ConstOffset;
4019  int64_t ImmOffset = 0;
4020 
4021  // Match the immediate offset first, which canonically is moved as low as
4022  // possible.
4023  std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
4024 
4025  if (ConstOffset != 0 &&
4028  Addr = PtrBase;
4029  ImmOffset = ConstOffset;
4030  }
4031 
4032  auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
4033  if (AddrDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) {
4034  int FI = AddrDef->MI->getOperand(1).getIndex();
4035  return {{
4036  [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr
4037  [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4038  }};
4039  }
4040 
4041  Register SAddr = AddrDef->Reg;
4042 
4043  if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
4044  Register LHS = AddrDef->MI->getOperand(1).getReg();
4045  Register RHS = AddrDef->MI->getOperand(2).getReg();
4046  auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI);
4047  auto RHSDef = getDefSrcRegIgnoringCopies(RHS, *MRI);
4048 
4049  if (LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX &&
4050  isSGPR(RHSDef->Reg)) {
4051  int FI = LHSDef->MI->getOperand(1).getIndex();
4052  MachineInstr &I = *Root.getParent();
4053  MachineBasicBlock *BB = I.getParent();
4054  const DebugLoc &DL = I.getDebugLoc();
4055  SAddr = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
4056 
4057  BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_I32), SAddr)
4058  .addFrameIndex(FI)
4059  .addReg(RHSDef->Reg);
4060  }
4061  }
4062 
4063  if (!isSGPR(SAddr))
4064  return None;
4065 
4066  return {{
4067  [=](MachineInstrBuilder &MIB) { MIB.addReg(SAddr); }, // saddr
4068  [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4069  }};
4070 }
4071 
4072 // Check whether the flat scratch SVS swizzle bug affects this access.
4073 bool AMDGPUInstructionSelector::checkFlatScratchSVSSwizzleBug(
4074  Register VAddr, Register SAddr, uint64_t ImmOffset) const {
4075  if (!Subtarget->hasFlatScratchSVSSwizzleBug())
4076  return false;
4077 
4078  // The bug affects the swizzling of SVS accesses if there is any carry out
4079  // from the two low order bits (i.e. from bit 1 into bit 2) when adding
4080  // voffset to (soffset + inst_offset).
4081  auto VKnown = KnownBits->getKnownBits(VAddr);
4082  auto SKnown = KnownBits::computeForAddSub(
4083  true, false, KnownBits->getKnownBits(SAddr),
4084  KnownBits::makeConstant(APInt(32, ImmOffset)));
4085  uint64_t VMax = VKnown.getMaxValue().getZExtValue();
4086  uint64_t SMax = SKnown.getMaxValue().getZExtValue();
4087  return (VMax & 3) + (SMax & 3) >= 4;
4088 }
4089 
4091 AMDGPUInstructionSelector::selectScratchSVAddr(MachineOperand &Root) const {
4092  Register Addr = Root.getReg();
4093  Register PtrBase;
4094  int64_t ConstOffset;
4095  int64_t ImmOffset = 0;
4096 
4097  // Match the immediate offset first, which canonically is moved as low as
4098  // possible.
4099  std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
4100 
4101  if (ConstOffset != 0 &&
4102  TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS, true)) {
4103  Addr = PtrBase;
4104  ImmOffset = ConstOffset;
4105  }
4106 
4107  auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
4108  if (AddrDef->MI->getOpcode() != AMDGPU::G_PTR_ADD)
4109  return None;
4110 
4111  Register RHS = AddrDef->MI->getOperand(2).getReg();
4112  if (RBI.getRegBank(RHS, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID)
4113  return None;
4114 
4115  Register LHS = AddrDef->MI->getOperand(1).getReg();
4116  auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI);
4117 
4118  if (checkFlatScratchSVSSwizzleBug(RHS, LHS, ImmOffset))
4119  return None;
4120 
4121  if (LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) {
4122  int FI = LHSDef->MI->getOperand(1).getIndex();
4123  return {{
4124  [=](MachineInstrBuilder &MIB) { MIB.addReg(RHS); }, // vaddr
4125  [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr
4126  [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4127  }};
4128  }
4129 
4130  if (!isSGPR(LHS))
4131  return None;
4132 
4133  return {{
4134  [=](MachineInstrBuilder &MIB) { MIB.addReg(RHS); }, // vaddr
4135  [=](MachineInstrBuilder &MIB) { MIB.addReg(LHS); }, // saddr
4136  [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4137  }};
4138 }
4139 
4141 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
4142  MachineInstr *MI = Root.getParent();
4143  MachineBasicBlock *MBB = MI->getParent();
4146 
4147  int64_t Offset = 0;
4148  if (mi_match(Root.getReg(), *MRI, m_ICst(Offset)) &&
4150  Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4151 
4152  // TODO: Should this be inside the render function? The iterator seems to
4153  // move.
4154  BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
4155  HighBits)
4156  .addImm(Offset & ~4095);
4157 
4158  return {{[=](MachineInstrBuilder &MIB) { // rsrc
4159  MIB.addReg(Info->getScratchRSrcReg());
4160  },
4161  [=](MachineInstrBuilder &MIB) { // vaddr
4162  MIB.addReg(HighBits);
4163  },
4164  [=](MachineInstrBuilder &MIB) { // soffset
4165  // Use constant zero for soffset and rely on eliminateFrameIndex
4166  // to choose the appropriate frame register if need be.
4167  MIB.addImm(0);
4168  },
4169  [=](MachineInstrBuilder &MIB) { // offset
4170  MIB.addImm(Offset & 4095);
4171  }}};
4172  }
4173 
4174  assert(Offset == 0 || Offset == -1);
4175 
4176  // Try to fold a frame index directly into the MUBUF vaddr field, and any
4177  // offsets.
4178  Optional<int> FI;
4179  Register VAddr = Root.getReg();
4180  if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) {
4181  Register PtrBase;
4182  int64_t ConstOffset;
4183  std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(VAddr, *MRI);
4184  if (ConstOffset != 0) {
4185  if (SIInstrInfo::isLegalMUBUFImmOffset(ConstOffset) &&
4187  KnownBits->signBitIsZero(PtrBase))) {
4188  const MachineInstr *PtrBaseDef = MRI->getVRegDef(PtrBase);
4189  if (PtrBaseDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
4190  FI = PtrBaseDef->getOperand(1).getIndex();
4191  else
4192  VAddr = PtrBase;
4193  Offset = ConstOffset;
4194  }
4195  } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
4196  FI = RootDef->getOperand(1).getIndex();
4197  }
4198  }
4199 
4200  return {{[=](MachineInstrBuilder &MIB) { // rsrc
4201  MIB.addReg(Info->getScratchRSrcReg());
4202  },
4203  [=](MachineInstrBuilder &MIB) { // vaddr
4204  if (FI.hasValue())
4205  MIB.addFrameIndex(FI.getValue());
4206  else
4207  MIB.addReg(VAddr);
4208  },
4209  [=](MachineInstrBuilder &MIB) { // soffset
4210  // Use constant zero for soffset and rely on eliminateFrameIndex
4211  // to choose the appropriate frame register if need be.
4212  MIB.addImm(0);
4213  },
4214  [=](MachineInstrBuilder &MIB) { // offset
4215  MIB.addImm(Offset);
4216  }}};
4217 }
4218 
4219 bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base,
4220  int64_t Offset) const {
4221  if (!isUInt<16>(Offset))
4222  return false;
4223 
4225  return true;
4226 
4227  // On Southern Islands instruction with a negative base value and an offset
4228  // don't seem to work.
4229  return KnownBits->signBitIsZero(Base);
4230 }
4231 
4232 bool AMDGPUInstructionSelector::isDSOffset2Legal(Register Base, int64_t Offset0,
4233  int64_t Offset1,
4234  unsigned Size) const {
4235  if (Offset0 % Size != 0 || Offset1 % Size != 0)
4236  return false;
4237  if (!isUInt<8>(Offset0 / Size) || !isUInt<8>(Offset1 / Size))
4238  return false;
4239 
4241  return true;
4242 
4243  // On Southern Islands instruction with a negative base value and an offset
4244  // don't seem to work.
4245  return KnownBits->signBitIsZero(Base);
4246 }
4247 
4248 bool AMDGPUInstructionSelector::isUnneededShiftMask(const MachineInstr &MI,
4249  unsigned ShAmtBits) const {
4250  assert(MI.getOpcode() == TargetOpcode::G_AND);
4251 
4252  Optional<APInt> RHS = getIConstantVRegVal(MI.getOperand(2).getReg(), *MRI);
4253  if (!RHS)
4254  return false;
4255 
4256  if (RHS->countTrailingOnes() >= ShAmtBits)
4257  return true;
4258 
4259  const APInt &LHSKnownZeros =
4260  KnownBits->getKnownZeroes(MI.getOperand(1).getReg());
4261  return (LHSKnownZeros | *RHS).countTrailingOnes() >= ShAmtBits;
4262 }
4263 
4264 // Return the wave level SGPR base address if this is a wave address.
4266  return Def->getOpcode() == AMDGPU::G_AMDGPU_WAVE_ADDRESS
4267  ? Def->getOperand(1).getReg()
4268  : Register();
4269 }
4270 
4272 AMDGPUInstructionSelector::selectMUBUFScratchOffset(
4273  MachineOperand &Root) const {
4274  Register Reg = Root.getReg();
4276 
4277  const MachineInstr *Def = MRI->getVRegDef(Reg);
4278  if (Register WaveBase = getWaveAddress(Def)) {
4279  return {{
4280  [=](MachineInstrBuilder &MIB) { // rsrc
4281  MIB.addReg(Info->getScratchRSrcReg());
4282  },
4283  [=](MachineInstrBuilder &MIB) { // soffset
4284  MIB.addReg(WaveBase);
4285  },
4286  [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // offset
4287  }};
4288  }
4289 
4290  int64_t Offset = 0;
4291 
4292  // FIXME: Copy check is a hack
4293  Register BasePtr;
4294  if (mi_match(Reg, *MRI, m_GPtrAdd(m_Reg(BasePtr), m_Copy(m_ICst(Offset))))) {
4296  return {};
4297  const MachineInstr *BasePtrDef = MRI->getVRegDef(BasePtr);
4298  Register WaveBase = getWaveAddress(BasePtrDef);
4299  if (!WaveBase)
4300  return {};
4301 
4302  return {{
4303  [=](MachineInstrBuilder &MIB) { // rsrc
4304  MIB.addReg(Info->getScratchRSrcReg());
4305  },
4306  [=](MachineInstrBuilder &MIB) { // soffset
4307  MIB.addReg(WaveBase);
4308  },
4309  [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
4310  }};
4311  }
4312 
4313  if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
4315  return {};
4316 
4317  return {{
4318  [=](MachineInstrBuilder &MIB) { // rsrc
4319  MIB.addReg(Info->getScratchRSrcReg());
4320  },
4321  [=](MachineInstrBuilder &MIB) { // soffset
4322  MIB.addImm(0);
4323  },
4324  [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
4325  }};
4326 }
4327 
4328 std::pair<Register, unsigned>
4329 AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const {
4330  const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
4331  if (!RootDef)
4332  return std::make_pair(Root.getReg(), 0);
4333 
4334  int64_t ConstAddr = 0;
4335 
4336  Register PtrBase;
4337  int64_t Offset;
4338  std::tie(PtrBase, Offset) =
4339  getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
4340 
4341  if (Offset) {
4342  if (isDSOffsetLegal(PtrBase, Offset)) {
4343  // (add n0, c0)
4344  return std::make_pair(PtrBase, Offset);
4345  }
4346  } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
4347  // TODO
4348 
4349 
4350  } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
4351  // TODO
4352 
4353  }
4354 
4355  return std::make_pair(Root.getReg(), 0);
4356 }
4357 
4359 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
4360  Register Reg;
4361  unsigned Offset;
4362  std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root);
4363  return {{
4364  [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
4365  [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }
4366  }};
4367 }
4368 
4370 AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const {
4371  return selectDSReadWrite2(Root, 4);
4372 }
4373 
4375 AMDGPUInstructionSelector::selectDS128Bit8ByteAligned(MachineOperand &Root) const {
4376  return selectDSReadWrite2(Root, 8);
4377 }
4378 
4380 AMDGPUInstructionSelector::selectDSReadWrite2(MachineOperand &Root,
4381  unsigned Size) const {
4382  Register Reg;
4383  unsigned Offset;
4384  std::tie(Reg, Offset) = selectDSReadWrite2Impl(Root, Size);
4385  return {{
4386  [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
4387  [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); },
4388  [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); }
4389  }};
4390 }
4391 
4392 std::pair<Register, unsigned>
4393 AMDGPUInstructionSelector::selectDSReadWrite2Impl(MachineOperand &Root,
4394  unsigned Size) const {
4395  const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
4396  if (!RootDef)
4397  return std::make_pair(Root.getReg(), 0);
4398 
4399  int64_t ConstAddr = 0;
4400 
4401  Register PtrBase;
4402  int64_t Offset;
4403  std::tie(PtrBase, Offset) =
4404  getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
4405 
4406  if (Offset) {
4407  int64_t OffsetValue0 = Offset;
4408  int64_t OffsetValue1 = Offset + Size;
4409  if (isDSOffset2Legal(PtrBase, OffsetValue0, OffsetValue1, Size)) {
4410  // (add n0, c0)
4411  return std::make_pair(PtrBase, OffsetValue0 / Size);
4412  }
4413  } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
4414  // TODO
4415 
4416  } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
4417  // TODO
4418 
4419  }
4420 
4421  return std::make_pair(Root.getReg(), 0);
4422 }
4423 
4424 /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return
4425 /// the base value with the constant offset. There may be intervening copies
4426 /// between \p Root and the identified constant. Returns \p Root, 0 if this does
4427 /// not match the pattern.
4428 std::pair<Register, int64_t>
4429 AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(
4430  Register Root, const MachineRegisterInfo &MRI) const {
4431  MachineInstr *RootI = getDefIgnoringCopies(Root, MRI);
4432  if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD)
4433  return {Root, 0};
4434 
4435  MachineOperand &RHS = RootI->getOperand(2);
4436  Optional<ValueAndVReg> MaybeOffset =
4437  getIConstantVRegValWithLookThrough(RHS.getReg(), MRI);
4438  if (!MaybeOffset)
4439  return {Root, 0};
4440  return {RootI->getOperand(1).getReg(), MaybeOffset->Value.getSExtValue()};
4441 }
4442 
4443 static void addZeroImm(MachineInstrBuilder &MIB) {
4444  MIB.addImm(0);
4445 }
4446 
4447 /// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p
4448 /// BasePtr is not valid, a null base pointer will be used.
4450  uint32_t FormatLo, uint32_t FormatHi,
4451  Register BasePtr) {
4452  Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4453  Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4454  Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
4455  Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
4456 
4457  B.buildInstr(AMDGPU::S_MOV_B32)
4458  .addDef(RSrc2)
4459  .addImm(FormatLo);
4460  B.buildInstr(AMDGPU::S_MOV_B32)
4461  .addDef(RSrc3)
4462  .addImm(FormatHi);
4463 
4464  // Build the half of the subregister with the constants before building the
4465  // full 128-bit register. If we are building multiple resource descriptors,
4466  // this will allow CSEing of the 2-component register.
4467  B.buildInstr(AMDGPU::REG_SEQUENCE)
4468  .addDef(RSrcHi)
4469  .addReg(RSrc2)
4470  .addImm(AMDGPU::sub0)
4471  .addReg(RSrc3)
4472  .addImm(AMDGPU::sub1);
4473 
4474  Register RSrcLo = BasePtr;
4475  if (!BasePtr) {
4476  RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
4477  B.buildInstr(AMDGPU::S_MOV_B64)
4478  .addDef(RSrcLo)
4479  .addImm(0);
4480  }
4481 
4482  B.buildInstr(AMDGPU::REG_SEQUENCE)
4483  .addDef(RSrc)
4484  .addReg(RSrcLo)
4485  .addImm(AMDGPU::sub0_sub1)
4486  .addReg(RSrcHi)
4487  .addImm(AMDGPU::sub2_sub3);
4488 
4489  return RSrc;
4490 }
4491 
4493  const SIInstrInfo &TII, Register BasePtr) {
4494  uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
4495 
4496  // FIXME: Why are half the "default" bits ignored based on the addressing
4497  // mode?
4498  return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr);
4499 }
4500 
4502  const SIInstrInfo &TII, Register BasePtr) {
4503  uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
4504 
4505  // FIXME: Why are half the "default" bits ignored based on the addressing
4506  // mode?
4507  return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr);
4508 }
4509 
4510 AMDGPUInstructionSelector::MUBUFAddressData
4511 AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const {
4512  MUBUFAddressData Data;
4513  Data.N0 = Src;
4514 
4515  Register PtrBase;
4516  int64_t Offset;
4517 
4518  std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI);
4519  if (isUInt<32>(Offset)) {
4520  Data.N0 = PtrBase;
4521  Data.Offset = Offset;
4522  }
4523 
4524  if (MachineInstr *InputAdd
4525  = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) {
4526  Data.N2 = InputAdd->getOperand(1).getReg();
4527  Data.N3 = InputAdd->getOperand(2).getReg();
4528 
4529  // FIXME: Need to fix extra SGPR->VGPRcopies inserted
4530  // FIXME: Don't know this was defined by operand 0
4531  //
4532  // TODO: Remove this when we have copy folding optimizations after
4533  // RegBankSelect.
4534  Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg();
4535  Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg();
4536  }
4537 
4538  return Data;
4539 }
4540 
4541 /// Return if the addr64 mubuf mode should be used for the given address.
4542 bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const {
4543  // (ptr_add N2, N3) -> addr64, or
4544  // (ptr_add (ptr_add N2, N3), C1) -> addr64
4545  if (Addr.N2)
4546  return true;
4547 
4548  const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI);
4549  return N0Bank->getID() == AMDGPU::VGPRRegBankID;
4550 }
4551 
4552 /// Split an immediate offset \p ImmOffset depending on whether it fits in the
4553 /// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable
4554 /// component.
4555 void AMDGPUInstructionSelector::splitIllegalMUBUFOffset(
4556  MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const {
4557  if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset))
4558  return;
4559 
4560  // Illegal offset, store it in soffset.
4561  SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
4562  B.buildInstr(AMDGPU::S_MOV_B32)
4563  .addDef(SOffset)
4564  .addImm(ImmOffset);
4565  ImmOffset = 0;
4566 }
4567 
4568 bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl(
4569  MachineOperand &Root, Register &VAddr, Register &RSrcReg,
4570  Register &SOffset, int64_t &Offset) const {
4571  // FIXME: Predicates should stop this from reaching here.
4572  // addr64 bit was removed for volcanic islands.
4573  if (!STI.hasAddr64() || STI.useFlatForGlobal())
4574  return false;
4575 
4576  MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4577  if (!shouldUseAddr64(AddrData))
4578  return false;
4579 
4580  Register N0 = AddrData.N0;
4581  Register N2 = AddrData.N2;
4582  Register N3 = AddrData.N3;
4583  Offset = AddrData.Offset;
4584 
4585  // Base pointer for the SRD.
4586  Register SRDPtr;
4587 
4588  if (N2) {
4589  if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4590  assert(N3);
4591  if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4592  // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
4593  // addr64, and construct the default resource from a 0 address.
4594  VAddr = N0;
4595  } else {
4596  SRDPtr = N3;
4597  VAddr = N2;
4598  }
4599  } else {
4600  // N2 is not divergent.
4601  SRDPtr = N2;
4602  VAddr = N3;
4603  }
4604  } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4605  // Use the default null pointer in the resource
4606  VAddr = N0;
4607  } else {
4608  // N0 -> offset, or
4609  // (N0 + C1) -> offset
4610  SRDPtr = N0;
4611  }
4612 
4613  MachineIRBuilder B(*Root.getParent());
4614  RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr);
4615  splitIllegalMUBUFOffset(B, SOffset, Offset);
4616  return true;
4617 }
4618 
4619 bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl(
4620  MachineOperand &Root, Register &RSrcReg, Register &SOffset,
4621  int64_t &Offset) const {
4622 
4623  // FIXME: Pattern should not reach here.
4624  if (STI.useFlatForGlobal())
4625  return false;
4626 
4627  MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4628  if (shouldUseAddr64(AddrData))
4629  return false;
4630 
4631  // N0 -> offset, or
4632  // (N0 + C1) -> offset
4633  Register SRDPtr = AddrData.N0;
4634  Offset = AddrData.Offset;
4635 
4636  // TODO: Look through extensions for 32-bit soffset.
4637  MachineIRBuilder B(*Root.getParent());
4638 
4639  RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr);
4640  splitIllegalMUBUFOffset(B, SOffset, Offset);
4641  return true;
4642 }
4643 
4645 AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const {
4646  Register VAddr;
4647  Register RSrcReg;
4648  Register SOffset;
4649  int64_t Offset = 0;
4650 
4651  if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4652  return {};
4653 
4654  // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4655  // pattern.
4656  return {{
4657  [=](MachineInstrBuilder &MIB) { // rsrc
4658  MIB.addReg(RSrcReg);
4659  },
4660  [=](MachineInstrBuilder &MIB) { // vaddr
4661  MIB.addReg(VAddr);
4662  },
4663  [=](MachineInstrBuilder &MIB) { // soffset
4664  if (SOffset)
4665  MIB.addReg(SOffset);
4666  else
4667  MIB.addImm(0);
4668  },
4669  [=](MachineInstrBuilder &MIB) { // offset
4670  MIB.addImm(Offset);
4671  },
4672  addZeroImm, // cpol
4673  addZeroImm, // tfe
4674  addZeroImm // swz
4675  }};
4676 }
4677 
4679 AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const {
4680  Register RSrcReg;
4681  Register SOffset;
4682  int64_t Offset = 0;
4683 
4684  if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4685  return {};
4686 
4687  return {{
4688  [=](MachineInstrBuilder &MIB) { // rsrc
4689  MIB.addReg(RSrcReg);
4690  },
4691  [=](MachineInstrBuilder &MIB) { // soffset
4692  if (SOffset)
4693  MIB.addReg(SOffset);
4694  else
4695  MIB.addImm(0);
4696  },
4697  [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4698  addZeroImm, // cpol
4699  addZeroImm, // tfe
4700  addZeroImm, // swz
4701  }};
4702 }
4703 
4705 AMDGPUInstructionSelector::selectMUBUFAddr64Atomic(MachineOperand &Root) const {
4706  Register VAddr;
4707  Register RSrcReg;
4708  Register SOffset;
4709  int64_t Offset = 0;
4710 
4711  if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4712  return {};
4713 
4714  // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4715  // pattern.
4716  return {{
4717  [=](MachineInstrBuilder &MIB) { // rsrc
4718  MIB.addReg(RSrcReg);
4719  },
4720  [=](MachineInstrBuilder &MIB) { // vaddr
4721  MIB.addReg(VAddr);
4722  },
4723  [=](MachineInstrBuilder &MIB) { // soffset
4724  if (SOffset)
4725  MIB.addReg(SOffset);
4726  else
4727  MIB.addImm(0);
4728  },
4729  [=](MachineInstrBuilder &MIB) { // offset
4730  MIB.addImm(Offset);
4731  },
4732  [=](MachineInstrBuilder &MIB) {
4733  MIB.addImm(AMDGPU::CPol::GLC); // cpol
4734  }
4735  }};
4736 }
4737 
4739 AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const {
4740  Register RSrcReg;
4741  Register SOffset;
4742  int64_t Offset = 0;
4743 
4744  if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4745  return {};
4746 
4747  return {{
4748  [=](MachineInstrBuilder &MIB) { // rsrc
4749  MIB.addReg(RSrcReg);
4750  },
4751  [=](MachineInstrBuilder &MIB) { // soffset
4752  if (SOffset)
4753  MIB.addReg(SOffset);
4754  else
4755  MIB.addImm(0);
4756  },
4757  [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4758  [=](MachineInstrBuilder &MIB) { MIB.addImm(AMDGPU::CPol::GLC); } // cpol
4759  }};
4760 }
4761 
4762 /// Get an immediate that must be 32-bits, and treated as zero extended.
4764  const MachineRegisterInfo &MRI) {
4765  // getIConstantVRegVal sexts any values, so see if that matters.
4767  if (!OffsetVal || !isInt<32>(*OffsetVal))
4768  return None;
4769  return Lo_32(*OffsetVal);
4770 }
4771 
4773 AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const {
4774  Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4775  if (!OffsetVal)
4776  return {};
4777 
4778  Optional<int64_t> EncodedImm =
4779  AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true);
4780  if (!EncodedImm)
4781  return {};
4782 
4783  return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); } }};
4784 }
4785 
4787 AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const {
4789 
4790  Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4791  if (!OffsetVal)
4792  return {};
4793 
4794  Optional<int64_t> EncodedImm
4795  = AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal);
4796  if (!EncodedImm)
4797  return {};
4798 
4799  return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); } }};
4800 }
4801 
4802 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
4803  const MachineInstr &MI,
4804  int OpIdx) const {
4805  assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4806  "Expected G_CONSTANT");
4807  MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue());
4808 }
4809 
4810 void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
4811  const MachineInstr &MI,
4812  int OpIdx) const {
4813  assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4814  "Expected G_CONSTANT");
4815  MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue());
4816 }
4817 
4818 void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB,
4819  const MachineInstr &MI,
4820  int OpIdx) const {
4821  assert(OpIdx == -1);
4822 
4823  const MachineOperand &Op = MI.getOperand(1);
4824  if (MI.getOpcode() == TargetOpcode::G_FCONSTANT)
4825  MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
4826  else {
4827  assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
4828  MIB.addImm(Op.getCImm()->getSExtValue());
4829  }
4830 }
4831 
4832 void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
4833  const MachineInstr &MI,
4834  int OpIdx) const {
4835  assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4836  "Expected G_CONSTANT");
4837  MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation());
4838 }
4839 
4840 /// This only really exists to satisfy DAG type checking machinery, so is a
4841 /// no-op here.
4842 void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB,
4843  const MachineInstr &MI,
4844  int OpIdx) const {
4845  MIB.addImm(MI.getOperand(OpIdx).getImm());
4846 }
4847 
4848 void AMDGPUInstructionSelector::renderExtractCPol(MachineInstrBuilder &MIB,
4849  const MachineInstr &MI,
4850  int OpIdx) const {
4851  assert(OpIdx >= 0 && "expected to match an immediate operand");
4852  MIB.addImm(MI.getOperand(OpIdx).getImm() & AMDGPU::CPol::ALL);
4853 }
4854 
4855 void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB,
4856  const MachineInstr &MI,
4857  int OpIdx) const {
4858  assert(OpIdx >= 0 && "expected to match an immediate operand");
4859  MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1);
4860 }
4861 
4862 void AMDGPUInstructionSelector::renderSetGLC(MachineInstrBuilder &MIB,
4863  const MachineInstr &MI,
4864  int OpIdx) const {
4865  assert(OpIdx >= 0 && "expected to match an immediate operand");
4866  MIB.addImm(MI.getOperand(OpIdx).getImm() | AMDGPU::CPol::GLC);
4867 }
4868 
4869 void AMDGPUInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
4870  const MachineInstr &MI,
4871  int OpIdx) const {
4872  MIB.addFrameIndex((MI.getOperand(1).getIndex()));
4873 }
4874 
4875 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const {
4877 }
4878 
4879 bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const {
4881 }
4882 
4883 bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const {
4885 }
4886 
4887 bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const {
4888  return TII.