LLVM  9.0.0svn
SIISelLowering.cpp
Go to the documentation of this file.
1 //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// Custom DAG lowering for SI
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #if defined(_MSC_VER) || defined(__MINGW32__)
15 // Provide M_PI.
16 #define _USE_MATH_DEFINES
17 #endif
18 
19 #include "SIISelLowering.h"
20 #include "AMDGPU.h"
21 #include "AMDGPUSubtarget.h"
22 #include "AMDGPUTargetMachine.h"
23 #include "SIDefines.h"
24 #include "SIInstrInfo.h"
25 #include "SIMachineFunctionInfo.h"
26 #include "SIRegisterInfo.h"
28 #include "Utils/AMDGPUBaseInfo.h"
29 #include "llvm/ADT/APFloat.h"
30 #include "llvm/ADT/APInt.h"
31 #include "llvm/ADT/ArrayRef.h"
32 #include "llvm/ADT/BitVector.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/ADT/StringRef.h"
36 #include "llvm/ADT/StringSwitch.h"
37 #include "llvm/ADT/Twine.h"
38 #include "llvm/CodeGen/Analysis.h"
56 #include "llvm/IR/Constants.h"
57 #include "llvm/IR/DataLayout.h"
58 #include "llvm/IR/DebugLoc.h"
59 #include "llvm/IR/DerivedTypes.h"
60 #include "llvm/IR/DiagnosticInfo.h"
61 #include "llvm/IR/Function.h"
62 #include "llvm/IR/GlobalValue.h"
63 #include "llvm/IR/InstrTypes.h"
64 #include "llvm/IR/Instruction.h"
65 #include "llvm/IR/Instructions.h"
66 #include "llvm/IR/IntrinsicInst.h"
67 #include "llvm/IR/Type.h"
68 #include "llvm/Support/Casting.h"
69 #include "llvm/Support/CodeGen.h"
71 #include "llvm/Support/Compiler.h"
73 #include "llvm/Support/KnownBits.h"
77 #include <cassert>
78 #include <cmath>
79 #include <cstdint>
80 #include <iterator>
81 #include <tuple>
82 #include <utility>
83 #include <vector>
84 
85 using namespace llvm;
86 
87 #define DEBUG_TYPE "si-lower"
88 
89 STATISTIC(NumTailCalls, "Number of tail calls");
90 
92  "amdgpu-vgpr-index-mode",
93  cl::desc("Use GPR indexing mode instead of movrel for vector indexing"),
94  cl::init(false));
95 
97  "amdgpu-frame-index-zero-bits",
98  cl::desc("High bits of frame index assumed to be zero"),
99  cl::init(5),
101 
103  "amdgpu-disable-loop-alignment",
104  cl::desc("Do not align and prefetch loops"),
105  cl::init(false));
106 
107 static unsigned findFirstFreeSGPR(CCState &CCInfo) {
108  unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
109  for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) {
110  if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) {
111  return AMDGPU::SGPR0 + Reg;
112  }
113  }
114  llvm_unreachable("Cannot allocate sgpr");
115 }
116 
118  const GCNSubtarget &STI)
119  : AMDGPUTargetLowering(TM, STI),
120  Subtarget(&STI) {
121  addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass);
122  addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass);
123 
124  addRegisterClass(MVT::i32, &AMDGPU::SReg_32_XM0RegClass);
125  addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass);
126 
127  addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass);
128  addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass);
129  addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass);
130 
131  addRegisterClass(MVT::v3i32, &AMDGPU::SGPR_96RegClass);
132  addRegisterClass(MVT::v3f32, &AMDGPU::VReg_96RegClass);
133 
134  addRegisterClass(MVT::v2i64, &AMDGPU::SReg_128RegClass);
135  addRegisterClass(MVT::v2f64, &AMDGPU::SReg_128RegClass);
136 
137  addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass);
138  addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
139 
140  addRegisterClass(MVT::v5i32, &AMDGPU::SGPR_160RegClass);
141  addRegisterClass(MVT::v5f32, &AMDGPU::VReg_160RegClass);
142 
143  addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass);
144  addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass);
145 
146  addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass);
147  addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass);
148 
149  if (Subtarget->has16BitInsts()) {
150  addRegisterClass(MVT::i16, &AMDGPU::SReg_32_XM0RegClass);
151  addRegisterClass(MVT::f16, &AMDGPU::SReg_32_XM0RegClass);
152 
153  // Unless there are also VOP3P operations, not operations are really legal.
154  addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32_XM0RegClass);
155  addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32_XM0RegClass);
156  addRegisterClass(MVT::v4i16, &AMDGPU::SReg_64RegClass);
157  addRegisterClass(MVT::v4f16, &AMDGPU::SReg_64RegClass);
158  }
159 
161 
162  // We need to custom lower vector stores from local memory
171 
180 
191 
194 
199 
205 
210 
213 
221 
229 
236 
243 
250 
253 
256 
260 
261 #if 0
264 #endif
265 
266  // We only support LOAD/STORE and vector manipulation ops for vectors
267  // with > 4 elements.
270  for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
271  switch (Op) {
272  case ISD::LOAD:
273  case ISD::STORE:
274  case ISD::BUILD_VECTOR:
275  case ISD::BITCAST:
281  break;
282  case ISD::CONCAT_VECTORS:
284  break;
285  default:
287  break;
288  }
289  }
290  }
291 
293 
294  // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that
295  // is expanded to avoid having two separate loops in case the index is a VGPR.
296 
297  // Most operations are naturally 32-bit vector operations. We only support
298  // load and store of i64 vectors, so promote v2i64 vector operations to v4i32.
299  for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) {
302 
305 
308 
311  }
312 
317 
320 
321  // Avoid stack access for these.
322  // TODO: Generalize to more vector types.
327 
333 
337 
342 
343  // Deal with vec3 vector operations when widened to vec4.
348 
349  // Deal with vec5 vector operations when widened to vec8.
354 
355  // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling,
356  // and output demarshalling
359 
360  // We can't return success/failure, only the old value,
361  // let LLVM add the comparison
364 
365  if (Subtarget->hasFlatAddressSpace()) {
368  }
369 
372 
373  // On SI this is s_memtime and s_memrealtime on VI.
377 
378  if (Subtarget->has16BitInsts()) {
382  }
383 
384  // v_mad_f32 does not support denormals according to some sources.
385  if (!Subtarget->hasFP32Denormals())
387 
388  if (!Subtarget->hasBFI()) {
389  // fcopysign can be done in a single instruction with BFI.
392  }
393 
394  if (!Subtarget->hasBCNT(32))
396 
397  if (!Subtarget->hasBCNT(64))
399 
400  if (Subtarget->hasFFBH())
402 
403  if (Subtarget->hasFFBL())
405 
406  // We only really have 32-bit BFE instructions (and 16-bit on VI).
407  //
408  // On SI+ there are 64-bit BFEs, but they are scalar only and there isn't any
409  // effort to match them now. We want this to be false for i64 cases when the
410  // extraction isn't restricted to the upper or lower half. Ideally we would
411  // have some pass reduce 64-bit extracts to 32-bit if possible. Extracts that
412  // span the midpoint are probably relatively rare, so don't worry about them
413  // for now.
414  if (Subtarget->hasBFE())
415  setHasExtractBitsInsn(true);
416 
421 
422 
423  // These are really only legal for ieee_mode functions. We should be avoiding
424  // them for functions that don't have ieee_mode enabled, so just say they are
425  // legal.
430 
431 
432  if (Subtarget->getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS) {
436  } else {
441  }
442 
444 
449 
450  if (Subtarget->has16BitInsts()) {
452 
455 
458 
461 
464 
469 
472 
478 
480 
482 
484 
486 
491 
496 
497  // F16 - Constant Actions.
499 
500  // F16 - Load/Store Actions.
505 
506  // F16 - VOP1 Actions.
515 
516  // F16 - VOP2 Actions.
519 
521 
522  // F16 - VOP3 Actions.
524  if (!Subtarget->hasFP16Denormals() && STI.hasMadF16())
526 
527  for (MVT VT : {MVT::v2i16, MVT::v2f16, MVT::v4i16, MVT::v4f16}) {
528  for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
529  switch (Op) {
530  case ISD::LOAD:
531  case ISD::STORE:
532  case ISD::BUILD_VECTOR:
533  case ISD::BITCAST:
539  break;
540  case ISD::CONCAT_VECTORS:
542  break;
543  default:
545  break;
546  }
547  }
548  }
549 
550  // XXX - Do these do anything? Vector constants turn into build_vector.
553 
556 
561 
566 
573 
578 
583 
588 
592 
593  if (!Subtarget->hasVOP3PInsts()) {
596  }
597 
599  // This isn't really legal, but this avoids the legalizer unrolling it (and
600  // allows matching fneg (fabs x) patterns)
602 
607 
610 
613  }
614 
615  if (Subtarget->hasVOP3PInsts()) {
626 
630 
633 
635 
638 
645 
650 
653 
656 
660 
664  }
665 
668 
669  if (Subtarget->has16BitInsts()) {
674  } else {
675  // Legalization hack.
678 
681  }
682 
685  }
686 
714 
715  // All memory operations. Some folding on the pointer operand is done to help
716  // matching the constant offsets in the addressing modes.
735 
737 }
738 
740  return Subtarget;
741 }
742 
743 //===----------------------------------------------------------------------===//
744 // TargetLowering queries
745 //===----------------------------------------------------------------------===//
746 
747 // v_mad_mix* support a conversion from f16 to f32.
748 //
749 // There is only one special case when denormals are enabled we don't currently,
750 // where this is OK to use.
752  EVT DestVT, EVT SrcVT) const {
753  return ((Opcode == ISD::FMAD && Subtarget->hasMadMixInsts()) ||
754  (Opcode == ISD::FMA && Subtarget->hasFmaMixInsts())) &&
755  DestVT.getScalarType() == MVT::f32 && !Subtarget->hasFP32Denormals() &&
756  SrcVT.getScalarType() == MVT::f16;
757 }
758 
760  // SI has some legal vector types, but no legal vector operations. Say no
761  // shuffles are legal in order to prefer scalarizing some vector operations.
762  return false;
763 }
764 
766  CallingConv::ID CC,
767  EVT VT) const {
768  // TODO: Consider splitting all arguments into 32-bit pieces.
769  if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) {
770  EVT ScalarVT = VT.getScalarType();
771  unsigned Size = ScalarVT.getSizeInBits();
772  if (Size == 32)
773  return ScalarVT.getSimpleVT();
774 
775  if (Size == 64)
776  return MVT::i32;
777 
778  if (Size == 16 && Subtarget->has16BitInsts())
779  return VT.isInteger() ? MVT::v2i16 : MVT::v2f16;
780  }
781 
782  return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
783 }
784 
786  CallingConv::ID CC,
787  EVT VT) const {
788  if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) {
789  unsigned NumElts = VT.getVectorNumElements();
790  EVT ScalarVT = VT.getScalarType();
791  unsigned Size = ScalarVT.getSizeInBits();
792 
793  if (Size == 32)
794  return NumElts;
795 
796  if (Size == 64)
797  return 2 * NumElts;
798 
799  if (Size == 16 && Subtarget->has16BitInsts())
800  return (VT.getVectorNumElements() + 1) / 2;
801  }
802 
803  return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
804 }
805 
808  EVT VT, EVT &IntermediateVT,
809  unsigned &NumIntermediates, MVT &RegisterVT) const {
810  if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) {
811  unsigned NumElts = VT.getVectorNumElements();
812  EVT ScalarVT = VT.getScalarType();
813  unsigned Size = ScalarVT.getSizeInBits();
814  if (Size == 32) {
815  RegisterVT = ScalarVT.getSimpleVT();
816  IntermediateVT = RegisterVT;
817  NumIntermediates = NumElts;
818  return NumIntermediates;
819  }
820 
821  if (Size == 64) {
822  RegisterVT = MVT::i32;
823  IntermediateVT = RegisterVT;
824  NumIntermediates = 2 * NumElts;
825  return NumIntermediates;
826  }
827 
828  // FIXME: We should fix the ABI to be the same on targets without 16-bit
829  // support, but unless we can properly handle 3-vectors, it will be still be
830  // inconsistent.
831  if (Size == 16 && Subtarget->has16BitInsts()) {
832  RegisterVT = VT.isInteger() ? MVT::v2i16 : MVT::v2f16;
833  IntermediateVT = RegisterVT;
834  NumIntermediates = (NumElts + 1) / 2;
835  return NumIntermediates;
836  }
837  }
838 
840  Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT);
841 }
842 
844  // Only limited forms of aggregate type currently expected.
845  assert(Ty->isStructTy() && "Expected struct type");
846 
847 
848  Type *ElementType = nullptr;
849  unsigned NumElts;
850  if (Ty->getContainedType(0)->isVectorTy()) {
851  VectorType *VecComponent = cast<VectorType>(Ty->getContainedType(0));
852  ElementType = VecComponent->getElementType();
853  NumElts = VecComponent->getNumElements();
854  } else {
855  ElementType = Ty->getContainedType(0);
856  NumElts = 1;
857  }
858 
859  assert((Ty->getContainedType(1) && Ty->getContainedType(1)->isIntegerTy(32)) && "Expected int32 type");
860 
861  // Calculate the size of the memVT type from the aggregate
862  unsigned Pow2Elts = 0;
863  unsigned ElementSize;
864  switch (ElementType->getTypeID()) {
865  default:
866  llvm_unreachable("Unknown type!");
867  case Type::IntegerTyID:
868  ElementSize = cast<IntegerType>(ElementType)->getBitWidth();
869  break;
870  case Type::HalfTyID:
871  ElementSize = 16;
872  break;
873  case Type::FloatTyID:
874  ElementSize = 32;
875  break;
876  }
877  unsigned AdditionalElts = ElementSize == 16 ? 2 : 1;
878  Pow2Elts = 1 << Log2_32_Ceil(NumElts + AdditionalElts);
879 
880  return MVT::getVectorVT(MVT::getVT(ElementType, false),
881  Pow2Elts);
882 }
883 
885  const CallInst &CI,
886  MachineFunction &MF,
887  unsigned IntrID) const {
888  if (const AMDGPU::RsrcIntrinsic *RsrcIntr =
889  AMDGPU::lookupRsrcIntrinsic(IntrID)) {
891  (Intrinsic::ID)IntrID);
892  if (Attr.hasFnAttribute(Attribute::ReadNone))
893  return false;
894 
896 
897  if (RsrcIntr->IsImage) {
898  Info.ptrVal = MFI->getImagePSV(
900  CI.getArgOperand(RsrcIntr->RsrcArg));
901  Info.align = 0;
902  } else {
903  Info.ptrVal = MFI->getBufferPSV(
905  CI.getArgOperand(RsrcIntr->RsrcArg));
906  }
907 
909  if (Attr.hasFnAttribute(Attribute::ReadOnly)) {
911  Info.memVT = MVT::getVT(CI.getType(), true);
912  if (Info.memVT == MVT::Other) {
913  // Some intrinsics return an aggregate type - special case to work out
914  // the correct memVT
915  Info.memVT = memVTFromAggregate(CI.getType());
916  }
918  } else if (Attr.hasFnAttribute(Attribute::WriteOnly)) {
919  Info.opc = ISD::INTRINSIC_VOID;
920  Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType());
922  } else {
923  // Atomic
925  Info.memVT = MVT::getVT(CI.getType());
929 
930  // XXX - Should this be volatile without known ordering?
932  }
933  return true;
934  }
935 
936  switch (IntrID) {
937  case Intrinsic::amdgcn_atomic_inc:
938  case Intrinsic::amdgcn_atomic_dec:
939  case Intrinsic::amdgcn_ds_ordered_add:
940  case Intrinsic::amdgcn_ds_ordered_swap:
941  case Intrinsic::amdgcn_ds_fadd:
942  case Intrinsic::amdgcn_ds_fmin:
943  case Intrinsic::amdgcn_ds_fmax: {
945  Info.memVT = MVT::getVT(CI.getType());
946  Info.ptrVal = CI.getOperand(0);
947  Info.align = 0;
949 
950  const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(4));
951  if (!Vol->isZero())
953 
954  return true;
955  }
956  case Intrinsic::amdgcn_ds_append:
957  case Intrinsic::amdgcn_ds_consume: {
959  Info.memVT = MVT::getVT(CI.getType());
960  Info.ptrVal = CI.getOperand(0);
961  Info.align = 0;
963 
964  const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(1));
965  if (!Vol->isZero())
967 
968  return true;
969  }
970  default:
971  return false;
972  }
973 }
974 
977  Type *&AccessTy) const {
978  switch (II->getIntrinsicID()) {
979  case Intrinsic::amdgcn_atomic_inc:
980  case Intrinsic::amdgcn_atomic_dec:
981  case Intrinsic::amdgcn_ds_ordered_add:
982  case Intrinsic::amdgcn_ds_ordered_swap:
983  case Intrinsic::amdgcn_ds_fadd:
984  case Intrinsic::amdgcn_ds_fmin:
985  case Intrinsic::amdgcn_ds_fmax: {
986  Value *Ptr = II->getArgOperand(0);
987  AccessTy = II->getType();
988  Ops.push_back(Ptr);
989  return true;
990  }
991  default:
992  return false;
993  }
994 }
995 
996 bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const {
997  if (!Subtarget->hasFlatInstOffsets()) {
998  // Flat instructions do not have offsets, and only have the register
999  // address.
1000  return AM.BaseOffs == 0 && AM.Scale == 0;
1001  }
1002 
1003  // GFX9 added a 13-bit signed offset. When using regular flat instructions,
1004  // the sign bit is ignored and is treated as a 12-bit unsigned offset.
1005 
1006  // GFX10 shrinked signed offset to 12 bits. When using regular flat
1007  // instructions, the sign bit is also ignored and is treated as 11-bit
1008  // unsigned offset.
1009 
1010  if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10)
1011  return isUInt<11>(AM.BaseOffs) && AM.Scale == 0;
1012 
1013  // Just r + i
1014  return isUInt<12>(AM.BaseOffs) && AM.Scale == 0;
1015 }
1016 
1018  if (Subtarget->hasFlatGlobalInsts())
1019  return isInt<13>(AM.BaseOffs) && AM.Scale == 0;
1020 
1021  if (!Subtarget->hasAddr64() || Subtarget->useFlatForGlobal()) {
1022  // Assume the we will use FLAT for all global memory accesses
1023  // on VI.
1024  // FIXME: This assumption is currently wrong. On VI we still use
1025  // MUBUF instructions for the r + i addressing mode. As currently
1026  // implemented, the MUBUF instructions only work on buffer < 4GB.
1027  // It may be possible to support > 4GB buffers with MUBUF instructions,
1028  // by setting the stride value in the resource descriptor which would
1029  // increase the size limit to (stride * 4GB). However, this is risky,
1030  // because it has never been validated.
1031  return isLegalFlatAddressingMode(AM);
1032  }
1033 
1034  return isLegalMUBUFAddressingMode(AM);
1035 }
1036 
1037 bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const {
1038  // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and
1039  // additionally can do r + r + i with addr64. 32-bit has more addressing
1040  // mode options. Depending on the resource constant, it can also do
1041  // (i64 r0) + (i32 r1) * (i14 i).
1042  //
1043  // Private arrays end up using a scratch buffer most of the time, so also
1044  // assume those use MUBUF instructions. Scratch loads / stores are currently
1045  // implemented as mubuf instructions with offen bit set, so slightly
1046  // different than the normal addr64.
1047  if (!isUInt<12>(AM.BaseOffs))
1048  return false;
1049 
1050  // FIXME: Since we can split immediate into soffset and immediate offset,
1051  // would it make sense to allow any immediate?
1052 
1053  switch (AM.Scale) {
1054  case 0: // r + i or just i, depending on HasBaseReg.
1055  return true;
1056  case 1:
1057  return true; // We have r + r or r + i.
1058  case 2:
1059  if (AM.HasBaseReg) {
1060  // Reject 2 * r + r.
1061  return false;
1062  }
1063 
1064  // Allow 2 * r as r + r
1065  // Or 2 * r + i is allowed as r + r + i.
1066  return true;
1067  default: // Don't allow n * r
1068  return false;
1069  }
1070 }
1071 
1073  const AddrMode &AM, Type *Ty,
1074  unsigned AS, Instruction *I) const {
1075  // No global is ever allowed as a base.
1076  if (AM.BaseGV)
1077  return false;
1078 
1079  if (AS == AMDGPUAS::GLOBAL_ADDRESS)
1080  return isLegalGlobalAddressingMode(AM);
1081 
1082  if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
1085  // If the offset isn't a multiple of 4, it probably isn't going to be
1086  // correctly aligned.
1087  // FIXME: Can we get the real alignment here?
1088  if (AM.BaseOffs % 4 != 0)
1089  return isLegalMUBUFAddressingMode(AM);
1090 
1091  // There are no SMRD extloads, so if we have to do a small type access we
1092  // will use a MUBUF load.
1093  // FIXME?: We also need to do this if unaligned, but we don't know the
1094  // alignment here.
1095  if (Ty->isSized() && DL.getTypeStoreSize(Ty) < 4)
1096  return isLegalGlobalAddressingMode(AM);
1097 
1098  if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) {
1099  // SMRD instructions have an 8-bit, dword offset on SI.
1100  if (!isUInt<8>(AM.BaseOffs / 4))
1101  return false;
1102  } else if (Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS) {
1103  // On CI+, this can also be a 32-bit literal constant offset. If it fits
1104  // in 8-bits, it can use a smaller encoding.
1105  if (!isUInt<32>(AM.BaseOffs / 4))
1106  return false;
1107  } else if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
1108  // On VI, these use the SMEM format and the offset is 20-bit in bytes.
1109  if (!isUInt<20>(AM.BaseOffs))
1110  return false;
1111  } else
1112  llvm_unreachable("unhandled generation");
1113 
1114  if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
1115  return true;
1116 
1117  if (AM.Scale == 1 && AM.HasBaseReg)
1118  return true;
1119 
1120  return false;
1121 
1122  } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
1123  return isLegalMUBUFAddressingMode(AM);
1124  } else if (AS == AMDGPUAS::LOCAL_ADDRESS ||
1125  AS == AMDGPUAS::REGION_ADDRESS) {
1126  // Basic, single offset DS instructions allow a 16-bit unsigned immediate
1127  // field.
1128  // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have
1129  // an 8-bit dword offset but we don't know the alignment here.
1130  if (!isUInt<16>(AM.BaseOffs))
1131  return false;
1132 
1133  if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
1134  return true;
1135 
1136  if (AM.Scale == 1 && AM.HasBaseReg)
1137  return true;
1138 
1139  return false;
1140  } else if (AS == AMDGPUAS::FLAT_ADDRESS ||
1142  // For an unknown address space, this usually means that this is for some
1143  // reason being used for pure arithmetic, and not based on some addressing
1144  // computation. We don't have instructions that compute pointers with any
1145  // addressing modes, so treat them as having no offset like flat
1146  // instructions.
1147  return isLegalFlatAddressingMode(AM);
1148  } else {
1149  llvm_unreachable("unhandled address space");
1150  }
1151 }
1152 
1153 bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT,
1154  const SelectionDAG &DAG) const {
1155  if (AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) {
1156  return (MemVT.getSizeInBits() <= 4 * 32);
1157  } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
1158  unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize();
1159  return (MemVT.getSizeInBits() <= MaxPrivateBits);
1160  } else if (AS == AMDGPUAS::LOCAL_ADDRESS) {
1161  return (MemVT.getSizeInBits() <= 2 * 32);
1162  }
1163  return true;
1164 }
1165 
1167  unsigned AddrSpace,
1168  unsigned Align,
1169  bool *IsFast) const {
1170  if (IsFast)
1171  *IsFast = false;
1172 
1173  // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96,
1174  // which isn't a simple VT.
1175  // Until MVT is extended to handle this, simply check for the size and
1176  // rely on the condition below: allow accesses if the size is a multiple of 4.
1177  if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 &&
1178  VT.getStoreSize() > 16)) {
1179  return false;
1180  }
1181 
1182  if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
1183  AddrSpace == AMDGPUAS::REGION_ADDRESS) {
1184  // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte
1185  // aligned, 8 byte access in a single operation using ds_read2/write2_b32
1186  // with adjacent offsets.
1187  bool AlignedBy4 = (Align % 4 == 0);
1188  if (IsFast)
1189  *IsFast = AlignedBy4;
1190 
1191  return AlignedBy4;
1192  }
1193 
1194  // FIXME: We have to be conservative here and assume that flat operations
1195  // will access scratch. If we had access to the IR function, then we
1196  // could determine if any private memory was used in the function.
1197  if (!Subtarget->hasUnalignedScratchAccess() &&
1198  (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS ||
1199  AddrSpace == AMDGPUAS::FLAT_ADDRESS)) {
1200  bool AlignedBy4 = Align >= 4;
1201  if (IsFast)
1202  *IsFast = AlignedBy4;
1203 
1204  return AlignedBy4;
1205  }
1206 
1207  if (Subtarget->hasUnalignedBufferAccess()) {
1208  // If we have an uniform constant load, it still requires using a slow
1209  // buffer instruction if unaligned.
1210  if (IsFast) {
1211  *IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS ||
1212  AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT) ?
1213  (Align % 4 == 0) : true;
1214  }
1215 
1216  return true;
1217  }
1218 
1219  // Smaller than dword value must be aligned.
1220  if (VT.bitsLT(MVT::i32))
1221  return false;
1222 
1223  // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the
1224  // byte-address are ignored, thus forcing Dword alignment.
1225  // This applies to private, global, and constant memory.
1226  if (IsFast)
1227  *IsFast = true;
1228 
1229  return VT.bitsGT(MVT::i32) && Align % 4 == 0;
1230 }
1231 
1233  uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset,
1234  bool ZeroMemset, bool MemcpyStrSrc,
1235  const AttributeList &FuncAttributes) const {
1236  // FIXME: Should account for address space here.
1237 
1238  // The default fallback uses the private pointer size as a guess for a type to
1239  // use. Make sure we switch these to 64-bit accesses.
1240 
1241  if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global
1242  return MVT::v4i32;
1243 
1244  if (Size >= 8 && DstAlign >= 4)
1245  return MVT::v2i32;
1246 
1247  // Use the default.
1248  return MVT::Other;
1249 }
1250 
1251 static bool isFlatGlobalAddrSpace(unsigned AS) {
1252  return AS == AMDGPUAS::GLOBAL_ADDRESS ||
1253  AS == AMDGPUAS::FLAT_ADDRESS ||
1256 }
1257 
1259  unsigned DestAS) const {
1260  return isFlatGlobalAddrSpace(SrcAS) && isFlatGlobalAddrSpace(DestAS);
1261 }
1262 
1264  const MemSDNode *MemNode = cast<MemSDNode>(N);
1265  const Value *Ptr = MemNode->getMemOperand()->getValue();
1266  const Instruction *I = dyn_cast_or_null<Instruction>(Ptr);
1267  return I && I->getMetadata("amdgpu.noclobber");
1268 }
1269 
1271  unsigned DestAS) const {
1272  // Flat -> private/local is a simple truncate.
1273  // Flat -> global is no-op
1274  if (SrcAS == AMDGPUAS::FLAT_ADDRESS)
1275  return true;
1276 
1277  return isNoopAddrSpaceCast(SrcAS, DestAS);
1278 }
1279 
1281  const MemSDNode *MemNode = cast<MemSDNode>(N);
1282 
1283  return AMDGPUInstrInfo::isUniformMMO(MemNode->getMemOperand());
1284 }
1285 
1288  if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16))
1289  return TypeSplitVector;
1290 
1292 }
1293 
1295  Type *Ty) const {
1296  // FIXME: Could be smarter if called for vector constants.
1297  return true;
1298 }
1299 
1301  if (Subtarget->has16BitInsts() && VT == MVT::i16) {
1302  switch (Op) {
1303  case ISD::LOAD:
1304  case ISD::STORE:
1305 
1306  // These operations are done with 32-bit instructions anyway.
1307  case ISD::AND:
1308  case ISD::OR:
1309  case ISD::XOR:
1310  case ISD::SELECT:
1311  // TODO: Extensions?
1312  return true;
1313  default:
1314  return false;
1315  }
1316  }
1317 
1318  // SimplifySetCC uses this function to determine whether or not it should
1319  // create setcc with i1 operands. We don't have instructions for i1 setcc.
1320  if (VT == MVT::i1 && Op == ISD::SETCC)
1321  return false;
1322 
1323  return TargetLowering::isTypeDesirableForOp(Op, VT);
1324 }
1325 
1326 SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG,
1327  const SDLoc &SL,
1328  SDValue Chain,
1329  uint64_t Offset) const {
1330  const DataLayout &DL = DAG.getDataLayout();
1331  MachineFunction &MF = DAG.getMachineFunction();
1333 
1334  const ArgDescriptor *InputPtrReg;
1335  const TargetRegisterClass *RC;
1336 
1337  std::tie(InputPtrReg, RC)
1339 
1342  SDValue BasePtr = DAG.getCopyFromReg(Chain, SL,
1343  MRI.getLiveInVirtReg(InputPtrReg->getRegister()), PtrVT);
1344 
1345  return DAG.getObjectPtrOffset(SL, BasePtr, Offset);
1346 }
1347 
1348 SDValue SITargetLowering::getImplicitArgPtr(SelectionDAG &DAG,
1349  const SDLoc &SL) const {
1350  uint64_t Offset = getImplicitParameterOffset(DAG.getMachineFunction(),
1351  FIRST_IMPLICIT);
1352  return lowerKernArgParameterPtr(DAG, SL, DAG.getEntryNode(), Offset);
1353 }
1354 
1355 SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT,
1356  const SDLoc &SL, SDValue Val,
1357  bool Signed,
1358  const ISD::InputArg *Arg) const {
1359  // First, if it is a widened vector, narrow it.
1360  if (VT.isVector() &&
1361  VT.getVectorNumElements() != MemVT.getVectorNumElements()) {
1362  EVT NarrowedVT =
1364  VT.getVectorNumElements());
1365  Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, NarrowedVT, Val,
1366  DAG.getConstant(0, SL, MVT::i32));
1367  }
1368 
1369  // Then convert the vector elements or scalar value.
1370  if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) &&
1371  VT.bitsLT(MemVT)) {
1372  unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext;
1373  Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT));
1374  }
1375 
1376  if (MemVT.isFloatingPoint())
1377  Val = getFPExtOrFPTrunc(DAG, Val, SL, VT);
1378  else if (Signed)
1379  Val = DAG.getSExtOrTrunc(Val, SL, VT);
1380  else
1381  Val = DAG.getZExtOrTrunc(Val, SL, VT);
1382 
1383  return Val;
1384 }
1385 
1386 SDValue SITargetLowering::lowerKernargMemParameter(
1387  SelectionDAG &DAG, EVT VT, EVT MemVT,
1388  const SDLoc &SL, SDValue Chain,
1389  uint64_t Offset, unsigned Align, bool Signed,
1390  const ISD::InputArg *Arg) const {
1391  Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
1393  MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
1394 
1395  // Try to avoid using an extload by loading earlier than the argument address,
1396  // and extracting the relevant bits. The load should hopefully be merged with
1397  // the previous argument.
1398  if (MemVT.getStoreSize() < 4 && Align < 4) {
1399  // TODO: Handle align < 4 and size >= 4 (can happen with packed structs).
1400  int64_t AlignDownOffset = alignDown(Offset, 4);
1401  int64_t OffsetDiff = Offset - AlignDownOffset;
1402 
1403  EVT IntVT = MemVT.changeTypeToInteger();
1404 
1405  // TODO: If we passed in the base kernel offset we could have a better
1406  // alignment than 4, but we don't really need it.
1407  SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, AlignDownOffset);
1408  SDValue Load = DAG.getLoad(MVT::i32, SL, Chain, Ptr, PtrInfo, 4,
1411 
1412  SDValue ShiftAmt = DAG.getConstant(OffsetDiff * 8, SL, MVT::i32);
1413  SDValue Extract = DAG.getNode(ISD::SRL, SL, MVT::i32, Load, ShiftAmt);
1414 
1415  SDValue ArgVal = DAG.getNode(ISD::TRUNCATE, SL, IntVT, Extract);
1416  ArgVal = DAG.getNode(ISD::BITCAST, SL, MemVT, ArgVal);
1417  ArgVal = convertArgType(DAG, VT, MemVT, SL, ArgVal, Signed, Arg);
1418 
1419 
1420  return DAG.getMergeValues({ ArgVal, Load.getValue(1) }, SL);
1421  }
1422 
1423  SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset);
1424  SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align,
1427 
1428  SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg);
1429  return DAG.getMergeValues({ Val, Load.getValue(1) }, SL);
1430 }
1431 
1432 SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA,
1433  const SDLoc &SL, SDValue Chain,
1434  const ISD::InputArg &Arg) const {
1435  MachineFunction &MF = DAG.getMachineFunction();
1436  MachineFrameInfo &MFI = MF.getFrameInfo();
1437 
1438  if (Arg.Flags.isByVal()) {
1439  unsigned Size = Arg.Flags.getByValSize();
1440  int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false);
1441  return DAG.getFrameIndex(FrameIdx, MVT::i32);
1442  }
1443 
1444  unsigned ArgOffset = VA.getLocMemOffset();
1445  unsigned ArgSize = VA.getValVT().getStoreSize();
1446 
1447  int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true);
1448 
1449  // Create load nodes to retrieve arguments from the stack.
1450  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1451  SDValue ArgValue;
1452 
1453  // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT)
1455  MVT MemVT = VA.getValVT();
1456 
1457  switch (VA.getLocInfo()) {
1458  default:
1459  break;
1460  case CCValAssign::BCvt:
1461  MemVT = VA.getLocVT();
1462  break;
1463  case CCValAssign::SExt:
1464  ExtType = ISD::SEXTLOAD;
1465  break;
1466  case CCValAssign::ZExt:
1467  ExtType = ISD::ZEXTLOAD;
1468  break;
1469  case CCValAssign::AExt:
1470  ExtType = ISD::EXTLOAD;
1471  break;
1472  }
1473 
1474  ArgValue = DAG.getExtLoad(
1475  ExtType, SL, VA.getLocVT(), Chain, FIN,
1477  MemVT);
1478  return ArgValue;
1479 }
1480 
1481 SDValue SITargetLowering::getPreloadedValue(SelectionDAG &DAG,
1482  const SIMachineFunctionInfo &MFI,
1483  EVT VT,
1485  const ArgDescriptor *Reg;
1486  const TargetRegisterClass *RC;
1487 
1488  std::tie(Reg, RC) = MFI.getPreloadedValue(PVID);
1489  return CreateLiveInRegister(DAG, RC, Reg->getRegister(), VT);
1490 }
1491 
1493  CallingConv::ID CallConv,
1495  BitVector &Skipped,
1496  FunctionType *FType,
1498  for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) {
1499  const ISD::InputArg *Arg = &Ins[I];
1500 
1501  assert((!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) &&
1502  "vector type argument should have been split");
1503 
1504  // First check if it's a PS input addr.
1505  if (CallConv == CallingConv::AMDGPU_PS &&
1506  !Arg->Flags.isInReg() && !Arg->Flags.isByVal() && PSInputNum <= 15) {
1507 
1508  bool SkipArg = !Arg->Used && !Info->isPSInputAllocated(PSInputNum);
1509 
1510  // Inconveniently only the first part of the split is marked as isSplit,
1511  // so skip to the end. We only want to increment PSInputNum once for the
1512  // entire split argument.
1513  if (Arg->Flags.isSplit()) {
1514  while (!Arg->Flags.isSplitEnd()) {
1515  assert(!Arg->VT.isVector() &&
1516  "unexpected vector split in ps argument type");
1517  if (!SkipArg)
1518  Splits.push_back(*Arg);
1519  Arg = &Ins[++I];
1520  }
1521  }
1522 
1523  if (SkipArg) {
1524  // We can safely skip PS inputs.
1525  Skipped.set(Arg->getOrigArgIndex());
1526  ++PSInputNum;
1527  continue;
1528  }
1529 
1530  Info->markPSInputAllocated(PSInputNum);
1531  if (Arg->Used)
1532  Info->markPSInputEnabled(PSInputNum);
1533 
1534  ++PSInputNum;
1535  }
1536 
1537  Splits.push_back(*Arg);
1538  }
1539 }
1540 
1541 // Allocate special inputs passed in VGPRs.
1543  MachineFunction &MF,
1544  const SIRegisterInfo &TRI,
1546  if (Info.hasWorkItemIDX()) {
1547  unsigned Reg = AMDGPU::VGPR0;
1548  MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1549 
1550  CCInfo.AllocateReg(Reg);
1552  }
1553 
1554  if (Info.hasWorkItemIDY()) {
1555  unsigned Reg = AMDGPU::VGPR1;
1556  MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1557 
1558  CCInfo.AllocateReg(Reg);
1560  }
1561 
1562  if (Info.hasWorkItemIDZ()) {
1563  unsigned Reg = AMDGPU::VGPR2;
1564  MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1565 
1566  CCInfo.AllocateReg(Reg);
1568  }
1569 }
1570 
1571 // Try to allocate a VGPR at the end of the argument list, or if no argument
1572 // VGPRs are left allocating a stack slot.
1574  ArrayRef<MCPhysReg> ArgVGPRs
1575  = makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), 32);
1576  unsigned RegIdx = CCInfo.getFirstUnallocated(ArgVGPRs);
1577  if (RegIdx == ArgVGPRs.size()) {
1578  // Spill to stack required.
1579  int64_t Offset = CCInfo.AllocateStack(4, 4);
1580 
1581  return ArgDescriptor::createStack(Offset);
1582  }
1583 
1584  unsigned Reg = ArgVGPRs[RegIdx];
1585  Reg = CCInfo.AllocateReg(Reg);
1586  assert(Reg != AMDGPU::NoRegister);
1587 
1588  MachineFunction &MF = CCInfo.getMachineFunction();
1589  MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1590  return ArgDescriptor::createRegister(Reg);
1591 }
1592 
1594  const TargetRegisterClass *RC,
1595  unsigned NumArgRegs) {
1596  ArrayRef<MCPhysReg> ArgSGPRs = makeArrayRef(RC->begin(), 32);
1597  unsigned RegIdx = CCInfo.getFirstUnallocated(ArgSGPRs);
1598  if (RegIdx == ArgSGPRs.size())
1599  report_fatal_error("ran out of SGPRs for arguments");
1600 
1601  unsigned Reg = ArgSGPRs[RegIdx];
1602  Reg = CCInfo.AllocateReg(Reg);
1603  assert(Reg != AMDGPU::NoRegister);
1604 
1605  MachineFunction &MF = CCInfo.getMachineFunction();
1606  MF.addLiveIn(Reg, RC);
1607  return ArgDescriptor::createRegister(Reg);
1608 }
1609 
1611  return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, 32);
1612 }
1613 
1615  return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, 16);
1616 }
1617 
1619  MachineFunction &MF,
1620  const SIRegisterInfo &TRI,
1622  if (Info.hasWorkItemIDX())
1623  Info.setWorkItemIDX(allocateVGPR32Input(CCInfo));
1624 
1625  if (Info.hasWorkItemIDY())
1626  Info.setWorkItemIDY(allocateVGPR32Input(CCInfo));
1627 
1628  if (Info.hasWorkItemIDZ())
1629  Info.setWorkItemIDZ(allocateVGPR32Input(CCInfo));
1630 }
1631 
1633  MachineFunction &MF,
1634  const SIRegisterInfo &TRI,
1636  auto &ArgInfo = Info.getArgInfo();
1637 
1638  // TODO: Unify handling with private memory pointers.
1639 
1640  if (Info.hasDispatchPtr())
1641  ArgInfo.DispatchPtr = allocateSGPR64Input(CCInfo);
1642 
1643  if (Info.hasQueuePtr())
1644  ArgInfo.QueuePtr = allocateSGPR64Input(CCInfo);
1645 
1646  if (Info.hasKernargSegmentPtr())
1647  ArgInfo.KernargSegmentPtr = allocateSGPR64Input(CCInfo);
1648 
1649  if (Info.hasDispatchID())
1650  ArgInfo.DispatchID = allocateSGPR64Input(CCInfo);
1651 
1652  // flat_scratch_init is not applicable for non-kernel functions.
1653 
1654  if (Info.hasWorkGroupIDX())
1655  ArgInfo.WorkGroupIDX = allocateSGPR32Input(CCInfo);
1656 
1657  if (Info.hasWorkGroupIDY())
1658  ArgInfo.WorkGroupIDY = allocateSGPR32Input(CCInfo);
1659 
1660  if (Info.hasWorkGroupIDZ())
1661  ArgInfo.WorkGroupIDZ = allocateSGPR32Input(CCInfo);
1662 
1663  if (Info.hasImplicitArgPtr())
1664  ArgInfo.ImplicitArgPtr = allocateSGPR64Input(CCInfo);
1665 }
1666 
1667 // Allocate special inputs passed in user SGPRs.
1668 static void allocateHSAUserSGPRs(CCState &CCInfo,
1669  MachineFunction &MF,
1670  const SIRegisterInfo &TRI,
1672  if (Info.hasImplicitBufferPtr()) {
1673  unsigned ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI);
1674  MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass);
1675  CCInfo.AllocateReg(ImplicitBufferPtrReg);
1676  }
1677 
1678  // FIXME: How should these inputs interact with inreg / custom SGPR inputs?
1679  if (Info.hasPrivateSegmentBuffer()) {
1680  unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI);
1681  MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass);
1682  CCInfo.AllocateReg(PrivateSegmentBufferReg);
1683  }
1684 
1685  if (Info.hasDispatchPtr()) {
1686  unsigned DispatchPtrReg = Info.addDispatchPtr(TRI);
1687  MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass);
1688  CCInfo.AllocateReg(DispatchPtrReg);
1689  }
1690 
1691  if (Info.hasQueuePtr()) {
1692  unsigned QueuePtrReg = Info.addQueuePtr(TRI);
1693  MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass);
1694  CCInfo.AllocateReg(QueuePtrReg);
1695  }
1696 
1697  if (Info.hasKernargSegmentPtr()) {
1698  unsigned InputPtrReg = Info.addKernargSegmentPtr(TRI);
1699  MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass);
1700  CCInfo.AllocateReg(InputPtrReg);
1701  }
1702 
1703  if (Info.hasDispatchID()) {
1704  unsigned DispatchIDReg = Info.addDispatchID(TRI);
1705  MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass);
1706  CCInfo.AllocateReg(DispatchIDReg);
1707  }
1708 
1709  if (Info.hasFlatScratchInit()) {
1710  unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI);
1711  MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
1712  CCInfo.AllocateReg(FlatScratchInitReg);
1713  }
1714 
1715  // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read
1716  // these from the dispatch pointer.
1717 }
1718 
1719 // Allocate special input registers that are initialized per-wave.
1720 static void allocateSystemSGPRs(CCState &CCInfo,
1721  MachineFunction &MF,
1723  CallingConv::ID CallConv,
1724  bool IsShader) {
1725  if (Info.hasWorkGroupIDX()) {
1726  unsigned Reg = Info.addWorkGroupIDX();
1727  MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1728  CCInfo.AllocateReg(Reg);
1729  }
1730 
1731  if (Info.hasWorkGroupIDY()) {
1732  unsigned Reg = Info.addWorkGroupIDY();
1733  MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1734  CCInfo.AllocateReg(Reg);
1735  }
1736 
1737  if (Info.hasWorkGroupIDZ()) {
1738  unsigned Reg = Info.addWorkGroupIDZ();
1739  MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1740  CCInfo.AllocateReg(Reg);
1741  }
1742 
1743  if (Info.hasWorkGroupInfo()) {
1744  unsigned Reg = Info.addWorkGroupInfo();
1745  MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1746  CCInfo.AllocateReg(Reg);
1747  }
1748 
1749  if (Info.hasPrivateSegmentWaveByteOffset()) {
1750  // Scratch wave offset passed in system SGPR.
1751  unsigned PrivateSegmentWaveByteOffsetReg;
1752 
1753  if (IsShader) {
1754  PrivateSegmentWaveByteOffsetReg =
1756 
1757  // This is true if the scratch wave byte offset doesn't have a fixed
1758  // location.
1759  if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) {
1760  PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo);
1761  Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg);
1762  }
1763  } else
1764  PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset();
1765 
1766  MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass);
1767  CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg);
1768  }
1769 }
1770 
1772  MachineFunction &MF,
1773  const SIRegisterInfo &TRI,
1775  // Now that we've figured out where the scratch register inputs are, see if
1776  // should reserve the arguments and use them directly.
1777  MachineFrameInfo &MFI = MF.getFrameInfo();
1778  bool HasStackObjects = MFI.hasStackObjects();
1779 
1780  // Record that we know we have non-spill stack objects so we don't need to
1781  // check all stack objects later.
1782  if (HasStackObjects)
1783  Info.setHasNonSpillStackObjects(true);
1784 
1785  // Everything live out of a block is spilled with fast regalloc, so it's
1786  // almost certain that spilling will be required.
1787  if (TM.getOptLevel() == CodeGenOpt::None)
1788  HasStackObjects = true;
1789 
1790  // For now assume stack access is needed in any callee functions, so we need
1791  // the scratch registers to pass in.
1792  bool RequiresStackAccess = HasStackObjects || MFI.hasCalls();
1793 
1794  const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1795  if (ST.isAmdHsaOrMesa(MF.getFunction())) {
1796  if (RequiresStackAccess) {
1797  // If we have stack objects, we unquestionably need the private buffer
1798  // resource. For the Code Object V2 ABI, this will be the first 4 user
1799  // SGPR inputs. We can reserve those and use them directly.
1800 
1801  unsigned PrivateSegmentBufferReg = Info.getPreloadedReg(
1803  Info.setScratchRSrcReg(PrivateSegmentBufferReg);
1804 
1805  if (MFI.hasCalls()) {
1806  // If we have calls, we need to keep the frame register in a register
1807  // that won't be clobbered by a call, so ensure it is copied somewhere.
1808 
1809  // This is not a problem for the scratch wave offset, because the same
1810  // registers are reserved in all functions.
1811 
1812  // FIXME: Nothing is really ensuring this is a call preserved register,
1813  // it's just selected from the end so it happens to be.
1814  unsigned ReservedOffsetReg
1816  Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1817  } else {
1818  unsigned PrivateSegmentWaveByteOffsetReg = Info.getPreloadedReg(
1820  Info.setScratchWaveOffsetReg(PrivateSegmentWaveByteOffsetReg);
1821  }
1822  } else {
1823  unsigned ReservedBufferReg
1825  unsigned ReservedOffsetReg
1827 
1828  // We tentatively reserve the last registers (skipping the last two
1829  // which may contain VCC). After register allocation, we'll replace
1830  // these with the ones immediately after those which were really
1831  // allocated. In the prologue copies will be inserted from the argument
1832  // to these reserved registers.
1833  Info.setScratchRSrcReg(ReservedBufferReg);
1834  Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1835  }
1836  } else {
1837  unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF);
1838 
1839  // Without HSA, relocations are used for the scratch pointer and the
1840  // buffer resource setup is always inserted in the prologue. Scratch wave
1841  // offset is still in an input SGPR.
1842  Info.setScratchRSrcReg(ReservedBufferReg);
1843 
1844  if (HasStackObjects && !MFI.hasCalls()) {
1845  unsigned ScratchWaveOffsetReg = Info.getPreloadedReg(
1847  Info.setScratchWaveOffsetReg(ScratchWaveOffsetReg);
1848  } else {
1849  unsigned ReservedOffsetReg
1851  Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1852  }
1853  }
1854 }
1855 
1858  return !Info->isEntryFunction();
1859 }
1860 
1862 
1863 }
1864 
1867  const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
1869 
1870  const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
1871  if (!IStart)
1872  return;
1873 
1874  const TargetInstrInfo *TII = Subtarget->getInstrInfo();
1875  MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
1876  MachineBasicBlock::iterator MBBI = Entry->begin();
1877  for (const MCPhysReg *I = IStart; *I; ++I) {
1878  const TargetRegisterClass *RC = nullptr;
1879  if (AMDGPU::SReg_64RegClass.contains(*I))
1880  RC = &AMDGPU::SGPR_64RegClass;
1881  else if (AMDGPU::SReg_32RegClass.contains(*I))
1882  RC = &AMDGPU::SGPR_32RegClass;
1883  else
1884  llvm_unreachable("Unexpected register class in CSRsViaCopy!");
1885 
1886  unsigned NewVR = MRI->createVirtualRegister(RC);
1887  // Create copy from CSR to a virtual register.
1888  Entry->addLiveIn(*I);
1889  BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
1890  .addReg(*I);
1891 
1892  // Insert the copy-back instructions right before the terminator.
1893  for (auto *Exit : Exits)
1894  BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
1895  TII->get(TargetOpcode::COPY), *I)
1896  .addReg(NewVR);
1897  }
1898 }
1899 
1901  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1902  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1903  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1905 
1906  MachineFunction &MF = DAG.getMachineFunction();
1907  const Function &Fn = MF.getFunction();
1908  FunctionType *FType = MF.getFunction().getFunctionType();
1910 
1911  if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) {
1912  DiagnosticInfoUnsupported NoGraphicsHSA(
1913  Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc());
1914  DAG.getContext()->diagnose(NoGraphicsHSA);
1915  return DAG.getEntryNode();
1916  }
1917 
1920  BitVector Skipped(Ins.size());
1921  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1922  *DAG.getContext());
1923 
1924  bool IsShader = AMDGPU::isShader(CallConv);
1925  bool IsKernel = AMDGPU::isKernel(CallConv);
1926  bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv);
1927 
1928  if (!IsEntryFunc) {
1929  // 4 bytes are reserved at offset 0 for the emergency stack slot. Skip over
1930  // this when allocating argument fixed offsets.
1931  CCInfo.AllocateStack(4, 4);
1932  }
1933 
1934  if (IsShader) {
1935  processShaderInputArgs(Splits, CallConv, Ins, Skipped, FType, Info);
1936 
1937  // At least one interpolation mode must be enabled or else the GPU will
1938  // hang.
1939  //
1940  // Check PSInputAddr instead of PSInputEnable. The idea is that if the user
1941  // set PSInputAddr, the user wants to enable some bits after the compilation
1942  // based on run-time states. Since we can't know what the final PSInputEna
1943  // will look like, so we shouldn't do anything here and the user should take
1944  // responsibility for the correct programming.
1945  //
1946  // Otherwise, the following restrictions apply:
1947  // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled.
1948  // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be
1949  // enabled too.
1950  if (CallConv == CallingConv::AMDGPU_PS) {
1951  if ((Info->getPSInputAddr() & 0x7F) == 0 ||
1952  ((Info->getPSInputAddr() & 0xF) == 0 &&
1953  Info->isPSInputAllocated(11))) {
1954  CCInfo.AllocateReg(AMDGPU::VGPR0);
1955  CCInfo.AllocateReg(AMDGPU::VGPR1);
1956  Info->markPSInputAllocated(0);
1957  Info->markPSInputEnabled(0);
1958  }
1959  if (Subtarget->isAmdPalOS()) {
1960  // For isAmdPalOS, the user does not enable some bits after compilation
1961  // based on run-time states; the register values being generated here are
1962  // the final ones set in hardware. Therefore we need to apply the
1963  // workaround to PSInputAddr and PSInputEnable together. (The case where
1964  // a bit is set in PSInputAddr but not PSInputEnable is where the
1965  // frontend set up an input arg for a particular interpolation mode, but
1966  // nothing uses that input arg. Really we should have an earlier pass
1967  // that removes such an arg.)
1968  unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable();
1969  if ((PsInputBits & 0x7F) == 0 ||
1970  ((PsInputBits & 0xF) == 0 &&
1971  (PsInputBits >> 11 & 1)))
1972  Info->markPSInputEnabled(
1974  }
1975  }
1976 
1977  assert(!Info->hasDispatchPtr() &&
1978  !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() &&
1979  !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() &&
1980  !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() &&
1981  !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() &&
1982  !Info->hasWorkItemIDZ());
1983  } else if (IsKernel) {
1984  assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX());
1985  } else {
1986  Splits.append(Ins.begin(), Ins.end());
1987  }
1988 
1989  if (IsEntryFunc) {
1990  allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info);
1991  allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info);
1992  }
1993 
1994  if (IsKernel) {
1995  analyzeFormalArgumentsCompute(CCInfo, Ins);
1996  } else {
1997  CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg);
1998  CCInfo.AnalyzeFormalArguments(Splits, AssignFn);
1999  }
2000 
2001  SmallVector<SDValue, 16> Chains;
2002 
2003  // FIXME: This is the minimum kernel argument alignment. We should improve
2004  // this to the maximum alignment of the arguments.
2005  //
2006  // FIXME: Alignment of explicit arguments totally broken with non-0 explicit
2007  // kern arg offset.
2008  const unsigned KernelArgBaseAlign = 16;
2009 
2010  for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) {
2011  const ISD::InputArg &Arg = Ins[i];
2012  if (Arg.isOrigArg() && Skipped[Arg.getOrigArgIndex()]) {
2013  InVals.push_back(DAG.getUNDEF(Arg.VT));
2014  continue;
2015  }
2016 
2017  CCValAssign &VA = ArgLocs[ArgIdx++];
2018  MVT VT = VA.getLocVT();
2019 
2020  if (IsEntryFunc && VA.isMemLoc()) {
2021  VT = Ins[i].VT;
2022  EVT MemVT = VA.getLocVT();
2023 
2024  const uint64_t Offset = VA.getLocMemOffset();
2025  unsigned Align = MinAlign(KernelArgBaseAlign, Offset);
2026 
2027  SDValue Arg = lowerKernargMemParameter(
2028  DAG, VT, MemVT, DL, Chain, Offset, Align, Ins[i].Flags.isSExt(), &Ins[i]);
2029  Chains.push_back(Arg.getValue(1));
2030 
2031  auto *ParamTy =
2032  dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex()));
2033  if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
2034  ParamTy && (ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
2035  ParamTy->getAddressSpace() == AMDGPUAS::REGION_ADDRESS)) {
2036  // On SI local pointers are just offsets into LDS, so they are always
2037  // less than 16-bits. On CI and newer they could potentially be
2038  // real pointers, so we can't guarantee their size.
2039  Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg,
2040  DAG.getValueType(MVT::i16));
2041  }
2042 
2043  InVals.push_back(Arg);
2044  continue;
2045  } else if (!IsEntryFunc && VA.isMemLoc()) {
2046  SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg);
2047  InVals.push_back(Val);
2048  if (!Arg.Flags.isByVal())
2049  Chains.push_back(Val.getValue(1));
2050  continue;
2051  }
2052 
2053  assert(VA.isRegLoc() && "Parameter must be in a register!");
2054 
2055  unsigned Reg = VA.getLocReg();
2056  const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
2057  EVT ValVT = VA.getValVT();
2058 
2059  Reg = MF.addLiveIn(Reg, RC);
2060  SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT);
2061 
2062  if (Arg.Flags.isSRet() && !getSubtarget()->enableHugePrivateBuffer()) {
2063  // The return object should be reasonably addressable.
2064 
2065  // FIXME: This helps when the return is a real sret. If it is a
2066  // automatically inserted sret (i.e. CanLowerReturn returns false), an
2067  // extra copy is inserted in SelectionDAGBuilder which obscures this.
2068  unsigned NumBits = 32 - AssumeFrameIndexHighZeroBits;
2069  Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
2070  DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), NumBits)));
2071  }
2072 
2073  // If this is an 8 or 16-bit value, it is really passed promoted
2074  // to 32 bits. Insert an assert[sz]ext to capture this, then
2075  // truncate to the right size.
2076  switch (VA.getLocInfo()) {
2077  case CCValAssign::Full:
2078  break;
2079  case CCValAssign::BCvt:
2080  Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val);
2081  break;
2082  case CCValAssign::SExt:
2083  Val = DAG.getNode(ISD::AssertSext, DL, VT, Val,
2084  DAG.getValueType(ValVT));
2085  Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2086  break;
2087  case CCValAssign::ZExt:
2088  Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
2089  DAG.getValueType(ValVT));
2090  Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2091  break;
2092  case CCValAssign::AExt:
2093  Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2094  break;
2095  default:
2096  llvm_unreachable("Unknown loc info!");
2097  }
2098 
2099  InVals.push_back(Val);
2100  }
2101 
2102  if (!IsEntryFunc) {
2103  // Special inputs come after user arguments.
2104  allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info);
2105  }
2106 
2107  // Start adding system SGPRs.
2108  if (IsEntryFunc) {
2109  allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsShader);
2110  } else {
2111  CCInfo.AllocateReg(Info->getScratchRSrcReg());
2112  CCInfo.AllocateReg(Info->getScratchWaveOffsetReg());
2113  CCInfo.AllocateReg(Info->getFrameOffsetReg());
2114  allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info);
2115  }
2116 
2117  auto &ArgUsageInfo =
2119  ArgUsageInfo.setFuncArgInfo(Fn, Info->getArgInfo());
2120 
2121  unsigned StackArgSize = CCInfo.getNextStackOffset();
2122  Info->setBytesInStackArgArea(StackArgSize);
2123 
2124  return Chains.empty() ? Chain :
2125  DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
2126 }
2127 
2128 // TODO: If return values can't fit in registers, we should return as many as
2129 // possible in registers before passing on stack.
2131  CallingConv::ID CallConv,
2132  MachineFunction &MF, bool IsVarArg,
2133  const SmallVectorImpl<ISD::OutputArg> &Outs,
2134  LLVMContext &Context) const {
2135  // Replacing returns with sret/stack usage doesn't make sense for shaders.
2136  // FIXME: Also sort of a workaround for custom vector splitting in LowerReturn
2137  // for shaders. Vector types should be explicitly handled by CC.
2138  if (AMDGPU::isEntryFunctionCC(CallConv))
2139  return true;
2140 
2142  CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
2143  return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg));
2144 }
2145 
2146 SDValue
2148  bool isVarArg,
2149  const SmallVectorImpl<ISD::OutputArg> &Outs,
2150  const SmallVectorImpl<SDValue> &OutVals,
2151  const SDLoc &DL, SelectionDAG &DAG) const {
2152  MachineFunction &MF = DAG.getMachineFunction();
2154 
2155  if (AMDGPU::isKernel(CallConv)) {
2156  return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs,
2157  OutVals, DL, DAG);
2158  }
2159 
2160  bool IsShader = AMDGPU::isShader(CallConv);
2161 
2162  Info->setIfReturnsVoid(Outs.empty());
2163  bool IsWaveEnd = Info->returnsVoid() && IsShader;
2164 
2165  // CCValAssign - represent the assignment of the return value to a location.
2168 
2169  // CCState - Info about the registers and stack slots.
2170  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2171  *DAG.getContext());
2172 
2173  // Analyze outgoing return values.
2174  CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));
2175 
2176  SDValue Flag;
2177  SmallVector<SDValue, 48> RetOps;
2178  RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2179 
2180  // Add return address for callable functions.
2181  if (!Info->isEntryFunction()) {
2183  SDValue ReturnAddrReg = CreateLiveInRegister(
2184  DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
2185 
2186  // FIXME: Should be able to use a vreg here, but need a way to prevent it
2187  // from being allcoated to a CSR.
2188 
2189  SDValue PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF),
2190  MVT::i64);
2191 
2192  Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, Flag);
2193  Flag = Chain.getValue(1);
2194 
2195  RetOps.push_back(PhysReturnAddrReg);
2196  }
2197 
2198  // Copy the result values into the output registers.
2199  for (unsigned I = 0, RealRVLocIdx = 0, E = RVLocs.size(); I != E;
2200  ++I, ++RealRVLocIdx) {
2201  CCValAssign &VA = RVLocs[I];
2202  assert(VA.isRegLoc() && "Can only return in registers!");
2203  // TODO: Partially return in registers if return values don't fit.
2204  SDValue Arg = OutVals[RealRVLocIdx];
2205 
2206  // Copied from other backends.
2207  switch (VA.getLocInfo()) {
2208  case CCValAssign::Full:
2209  break;
2210  case CCValAssign::BCvt:
2211  Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2212  break;
2213  case CCValAssign::SExt:
2214  Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2215  break;
2216  case CCValAssign::ZExt:
2217  Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2218  break;
2219  case CCValAssign::AExt:
2220  Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2221  break;
2222  default:
2223  llvm_unreachable("Unknown loc info!");
2224  }
2225 
2226  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
2227  Flag = Chain.getValue(1);
2228  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2229  }
2230 
2231  // FIXME: Does sret work properly?
2232  if (!Info->isEntryFunction()) {
2233  const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
2234  const MCPhysReg *I =
2236  if (I) {
2237  for (; *I; ++I) {
2238  if (AMDGPU::SReg_64RegClass.contains(*I))
2239  RetOps.push_back(DAG.getRegister(*I, MVT::i64));
2240  else if (AMDGPU::SReg_32RegClass.contains(*I))
2241  RetOps.push_back(DAG.getRegister(*I, MVT::i32));
2242  else
2243  llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2244  }
2245  }
2246  }
2247 
2248  // Update chain and glue.
2249  RetOps[0] = Chain;
2250  if (Flag.getNode())
2251  RetOps.push_back(Flag);
2252 
2253  unsigned Opc = AMDGPUISD::ENDPGM;
2254  if (!IsWaveEnd)
2256  return DAG.getNode(Opc, DL, MVT::Other, RetOps);
2257 }
2258 
2260  SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
2261  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
2262  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool IsThisReturn,
2263  SDValue ThisVal) const {
2264  CCAssignFn *RetCC = CCAssignFnForReturn(CallConv, IsVarArg);
2265 
2266  // Assign locations to each value returned by this call.
2268  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
2269  *DAG.getContext());
2270  CCInfo.AnalyzeCallResult(Ins, RetCC);
2271 
2272  // Copy all of the result registers out of their specified physreg.
2273  for (unsigned i = 0; i != RVLocs.size(); ++i) {
2274  CCValAssign VA = RVLocs[i];
2275  SDValue Val;
2276 
2277  if (VA.isRegLoc()) {
2278  Val = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag);
2279  Chain = Val.getValue(1);
2280  InFlag = Val.getValue(2);
2281  } else if (VA.isMemLoc()) {
2282  report_fatal_error("TODO: return values in memory");
2283  } else
2284  llvm_unreachable("unknown argument location type");
2285 
2286  switch (VA.getLocInfo()) {
2287  case CCValAssign::Full:
2288  break;
2289  case CCValAssign::BCvt:
2290  Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
2291  break;
2292  case CCValAssign::ZExt:
2293  Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val,
2294  DAG.getValueType(VA.getValVT()));
2295  Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2296  break;
2297  case CCValAssign::SExt:
2298  Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val,
2299  DAG.getValueType(VA.getValVT()));
2300  Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2301  break;
2302  case CCValAssign::AExt:
2303  Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2304  break;
2305  default:
2306  llvm_unreachable("Unknown loc info!");
2307  }
2308 
2309  InVals.push_back(Val);
2310  }
2311 
2312  return Chain;
2313 }
2314 
2315 // Add code to pass special inputs required depending on used features separate
2316 // from the explicit user arguments present in the IR.
2318  CallLoweringInfo &CLI,
2319  CCState &CCInfo,
2320  const SIMachineFunctionInfo &Info,
2321  SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass,
2322  SmallVectorImpl<SDValue> &MemOpChains,
2323  SDValue Chain) const {
2324  // If we don't have a call site, this was a call inserted by
2325  // legalization. These can never use special inputs.
2326  if (!CLI.CS)
2327  return;
2328 
2329  const Function *CalleeFunc = CLI.CS.getCalledFunction();
2330  assert(CalleeFunc);
2331 
2332  SelectionDAG &DAG = CLI.DAG;
2333  const SDLoc &DL = CLI.DL;
2334 
2335  const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
2336 
2337  auto &ArgUsageInfo =
2339  const AMDGPUFunctionArgInfo &CalleeArgInfo
2340  = ArgUsageInfo.lookupFuncArgInfo(*CalleeFunc);
2341 
2342  const AMDGPUFunctionArgInfo &CallerArgInfo = Info.getArgInfo();
2343 
2344  // TODO: Unify with private memory register handling. This is complicated by
2345  // the fact that at least in kernels, the input argument is not necessarily
2346  // in the same location as the input.
2359  };
2360 
2361  for (auto InputID : InputRegs) {
2362  const ArgDescriptor *OutgoingArg;
2363  const TargetRegisterClass *ArgRC;
2364 
2365  std::tie(OutgoingArg, ArgRC) = CalleeArgInfo.getPreloadedValue(InputID);
2366  if (!OutgoingArg)
2367  continue;
2368 
2369  const ArgDescriptor *IncomingArg;
2370  const TargetRegisterClass *IncomingArgRC;
2371  std::tie(IncomingArg, IncomingArgRC)
2372  = CallerArgInfo.getPreloadedValue(InputID);
2373  assert(IncomingArgRC == ArgRC);
2374 
2375  // All special arguments are ints for now.
2376  EVT ArgVT = TRI->getSpillSize(*ArgRC) == 8 ? MVT::i64 : MVT::i32;
2377  SDValue InputReg;
2378 
2379  if (IncomingArg) {
2380  InputReg = loadInputValue(DAG, ArgRC, ArgVT, DL, *IncomingArg);
2381  } else {
2382  // The implicit arg ptr is special because it doesn't have a corresponding
2383  // input for kernels, and is computed from the kernarg segment pointer.
2384  assert(InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
2385  InputReg = getImplicitArgPtr(DAG, DL);
2386  }
2387 
2388  if (OutgoingArg->isRegister()) {
2389  RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg);
2390  } else {
2391  unsigned SpecialArgOffset = CCInfo.AllocateStack(ArgVT.getStoreSize(), 4);
2392  SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg,
2393  SpecialArgOffset);
2394  MemOpChains.push_back(ArgStore);
2395  }
2396  }
2397 }
2398 
2400  return CC == CallingConv::Fast;
2401 }
2402 
2403 /// Return true if we might ever do TCO for calls with this calling convention.
2405  switch (CC) {
2406  case CallingConv::C:
2407  return true;
2408  default:
2409  return canGuaranteeTCO(CC);
2410  }
2411 }
2412 
2414  SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg,
2415  const SmallVectorImpl<ISD::OutputArg> &Outs,
2416  const SmallVectorImpl<SDValue> &OutVals,
2417  const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
2418  if (!mayTailCallThisCC(CalleeCC))
2419  return false;
2420 
2421  MachineFunction &MF = DAG.getMachineFunction();
2422  const Function &CallerF = MF.getFunction();
2423  CallingConv::ID CallerCC = CallerF.getCallingConv();
2425  const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
2426 
2427  // Kernels aren't callable, and don't have a live in return address so it
2428  // doesn't make sense to do a tail call with entry functions.
2429  if (!CallerPreserved)
2430  return false;
2431 
2432  bool CCMatch = CallerCC == CalleeCC;
2433 
2435  if (canGuaranteeTCO(CalleeCC) && CCMatch)
2436  return true;
2437  return false;
2438  }
2439 
2440  // TODO: Can we handle var args?
2441  if (IsVarArg)
2442  return false;
2443 
2444  for (const Argument &Arg : CallerF.args()) {
2445  if (Arg.hasByValAttr())
2446  return false;
2447  }
2448 
2449  LLVMContext &Ctx = *DAG.getContext();
2450 
2451  // Check that the call results are passed in the same way.
2452  if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, Ctx, Ins,
2453  CCAssignFnForCall(CalleeCC, IsVarArg),
2454  CCAssignFnForCall(CallerCC, IsVarArg)))
2455  return false;
2456 
2457  // The callee has to preserve all registers the caller needs to preserve.
2458  if (!CCMatch) {
2459  const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
2460  if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2461  return false;
2462  }
2463 
2464  // Nothing more to check if the callee is taking no arguments.
2465  if (Outs.empty())
2466  return true;
2467 
2469  CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, Ctx);
2470 
2471  CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, IsVarArg));
2472 
2473  const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
2474  // If the stack arguments for this call do not fit into our own save area then
2475  // the call cannot be made tail.
2476  // TODO: Is this really necessary?
2477  if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea())
2478  return false;
2479 
2480  const MachineRegisterInfo &MRI = MF.getRegInfo();
2481  return parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals);
2482 }
2483 
2485  if (!CI->isTailCall())
2486  return false;
2487 
2488  const Function *ParentFn = CI->getParent()->getParent();
2489  if (AMDGPU::isEntryFunctionCC(ParentFn->getCallingConv()))
2490  return false;
2491 
2492  auto Attr = ParentFn->getFnAttribute("disable-tail-calls");
2493  return (Attr.getValueAsString() != "true");
2494 }
2495 
2496 // The wave scratch offset register is used as the global base pointer.
2498  SmallVectorImpl<SDValue> &InVals) const {
2499  SelectionDAG &DAG = CLI.DAG;
2500  const SDLoc &DL = CLI.DL;
2502  SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
2504  SDValue Chain = CLI.Chain;
2505  SDValue Callee = CLI.Callee;
2506  bool &IsTailCall = CLI.IsTailCall;
2507  CallingConv::ID CallConv = CLI.CallConv;
2508  bool IsVarArg = CLI.IsVarArg;
2509  bool IsSibCall = false;
2510  bool IsThisReturn = false;
2511  MachineFunction &MF = DAG.getMachineFunction();
2512 
2513  if (IsVarArg) {
2514  return lowerUnhandledCall(CLI, InVals,
2515  "unsupported call to variadic function ");
2516  }
2517 
2518  if (!CLI.CS.getInstruction())
2519  report_fatal_error("unsupported libcall legalization");
2520 
2521  if (!CLI.CS.getCalledFunction()) {
2522  return lowerUnhandledCall(CLI, InVals,
2523  "unsupported indirect call to function ");
2524  }
2525 
2526  if (IsTailCall && MF.getTarget().Options.GuaranteedTailCallOpt) {
2527  return lowerUnhandledCall(CLI, InVals,
2528  "unsupported required tail call to function ");
2529  }
2530 
2532  // Note the issue is with the CC of the calling function, not of the call
2533  // itself.
2534  return lowerUnhandledCall(CLI, InVals,
2535  "unsupported call from graphics shader of function ");
2536  }
2537 
2538  // The first 4 bytes are reserved for the callee's emergency stack slot.
2539  if (IsTailCall) {
2540  IsTailCall = isEligibleForTailCallOptimization(
2541  Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG);
2542  if (!IsTailCall && CLI.CS && CLI.CS.isMustTailCall()) {
2543  report_fatal_error("failed to perform tail call elimination on a call "
2544  "site marked musttail");
2545  }
2546 
2547  bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
2548 
2549  // A sibling call is one where we're under the usual C ABI and not planning
2550  // to change that but can still do a tail call:
2551  if (!TailCallOpt && IsTailCall)
2552  IsSibCall = true;
2553 
2554  if (IsTailCall)
2555  ++NumTailCalls;
2556  }
2557 
2559 
2560  // Analyze operands of the call, assigning locations to each operand.
2562  CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
2563  CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, IsVarArg);
2564 
2565  // The first 4 bytes are reserved for the callee's emergency stack slot.
2566  CCInfo.AllocateStack(4, 4);
2567 
2568  CCInfo.AnalyzeCallOperands(Outs, AssignFn);
2569 
2570  // Get a count of how many bytes are to be pushed on the stack.
2571  unsigned NumBytes = CCInfo.getNextStackOffset();
2572 
2573  if (IsSibCall) {
2574  // Since we're not changing the ABI to make this a tail call, the memory
2575  // operands are already available in the caller's incoming argument space.
2576  NumBytes = 0;
2577  }
2578 
2579  // FPDiff is the byte offset of the call's argument area from the callee's.
2580  // Stores to callee stack arguments will be placed in FixedStackSlots offset
2581  // by this amount for a tail call. In a sibling call it must be 0 because the
2582  // caller will deallocate the entire stack and the callee still expects its
2583  // arguments to begin at SP+0. Completely unused for non-tail calls.
2584  int32_t FPDiff = 0;
2585  MachineFrameInfo &MFI = MF.getFrameInfo();
2587 
2588  SDValue CallerSavedFP;
2589 
2590  // Adjust the stack pointer for the new arguments...
2591  // These operations are automatically eliminated by the prolog/epilog pass
2592  if (!IsSibCall) {
2593  Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
2594 
2595  SmallVector<SDValue, 4> CopyFromChains;
2596 
2597  unsigned OffsetReg = Info->getScratchWaveOffsetReg();
2598 
2599  // In the HSA case, this should be an identity copy.
2600  SDValue ScratchRSrcReg
2601  = DAG.getCopyFromReg(Chain, DL, Info->getScratchRSrcReg(), MVT::v4i32);
2602  RegsToPass.emplace_back(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg);
2603  CopyFromChains.push_back(ScratchRSrcReg.getValue(1));
2604 
2605  // TODO: Don't hardcode these registers and get from the callee function.
2606  SDValue ScratchWaveOffsetReg
2607  = DAG.getCopyFromReg(Chain, DL, OffsetReg, MVT::i32);
2608  RegsToPass.emplace_back(AMDGPU::SGPR4, ScratchWaveOffsetReg);
2609  CopyFromChains.push_back(ScratchWaveOffsetReg.getValue(1));
2610 
2611  if (!Info->isEntryFunction()) {
2612  // Avoid clobbering this function's FP value. In the current convention
2613  // callee will overwrite this, so do save/restore around the call site.
2614  CallerSavedFP = DAG.getCopyFromReg(Chain, DL,
2615  Info->getFrameOffsetReg(), MVT::i32);
2616  CopyFromChains.push_back(CallerSavedFP.getValue(1));
2617  }
2618 
2619  Chain = DAG.getTokenFactor(DL, CopyFromChains);
2620  }
2621 
2622  SmallVector<SDValue, 8> MemOpChains;
2623  MVT PtrVT = MVT::i32;
2624 
2625  // Walk the register/memloc assignments, inserting copies/loads.
2626  for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); i != e;
2627  ++i, ++realArgIdx) {
2628  CCValAssign &VA = ArgLocs[i];
2629  SDValue Arg = OutVals[realArgIdx];
2630 
2631  // Promote the value if needed.
2632  switch (VA.getLocInfo()) {
2633  case CCValAssign::Full:
2634  break;
2635  case CCValAssign::BCvt:
2636  Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2637  break;
2638  case CCValAssign::ZExt:
2639  Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2640  break;
2641  case CCValAssign::SExt:
2642  Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2643  break;
2644  case CCValAssign::AExt:
2645  Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2646  break;
2647  case CCValAssign::FPExt:
2648  Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg);
2649  break;
2650  default:
2651  llvm_unreachable("Unknown loc info!");
2652  }
2653 
2654  if (VA.isRegLoc()) {
2655  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2656  } else {
2657  assert(VA.isMemLoc());
2658 
2659  SDValue DstAddr;
2660  MachinePointerInfo DstInfo;
2661 
2662  unsigned LocMemOffset = VA.getLocMemOffset();
2663  int32_t Offset = LocMemOffset;
2664 
2665  SDValue PtrOff = DAG.getConstant(Offset, DL, PtrVT);
2666  unsigned Align = 0;
2667 
2668  if (IsTailCall) {
2669  ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
2670  unsigned OpSize = Flags.isByVal() ?
2671  Flags.getByValSize() : VA.getValVT().getStoreSize();
2672 
2673  // FIXME: We can have better than the minimum byval required alignment.
2674  Align = Flags.isByVal() ? Flags.getByValAlign() :
2675  MinAlign(Subtarget->getStackAlignment(), Offset);
2676 
2677  Offset = Offset + FPDiff;
2678  int FI = MFI.CreateFixedObject(OpSize, Offset, true);
2679 
2680  DstAddr = DAG.getFrameIndex(FI, PtrVT);
2681  DstInfo = MachinePointerInfo::getFixedStack(MF, FI);
2682 
2683  // Make sure any stack arguments overlapping with where we're storing
2684  // are loaded before this eventual operation. Otherwise they'll be
2685  // clobbered.
2686 
2687  // FIXME: Why is this really necessary? This seems to just result in a
2688  // lot of code to copy the stack and write them back to the same
2689  // locations, which are supposed to be immutable?
2690  Chain = addTokenForArgument(Chain, DAG, MFI, FI);
2691  } else {
2692  DstAddr = PtrOff;
2693  DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset);
2694  Align = MinAlign(Subtarget->getStackAlignment(), LocMemOffset);
2695  }
2696 
2697  if (Outs[i].Flags.isByVal()) {
2698  SDValue SizeNode =
2699  DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i32);
2700  SDValue Cpy = DAG.getMemcpy(
2701  Chain, DL, DstAddr, Arg, SizeNode, Outs[i].Flags.getByValAlign(),
2702  /*isVol = */ false, /*AlwaysInline = */ true,
2703  /*isTailCall = */ false, DstInfo,
2706 
2707  MemOpChains.push_back(Cpy);
2708  } else {
2709  SDValue Store = DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo, Align);
2710  MemOpChains.push_back(Store);
2711  }
2712  }
2713  }
2714 
2715  // Copy special input registers after user input arguments.
2716  passSpecialInputs(CLI, CCInfo, *Info, RegsToPass, MemOpChains, Chain);
2717 
2718  if (!MemOpChains.empty())
2719  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
2720 
2721  // Build a sequence of copy-to-reg nodes chained together with token chain
2722  // and flag operands which copy the outgoing args into the appropriate regs.
2723  SDValue InFlag;
2724  for (auto &RegToPass : RegsToPass) {
2725  Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first,
2726  RegToPass.second, InFlag);
2727  InFlag = Chain.getValue(1);
2728  }
2729 
2730 
2731  SDValue PhysReturnAddrReg;
2732  if (IsTailCall) {
2733  // Since the return is being combined with the call, we need to pass on the
2734  // return address.
2735 
2737  SDValue ReturnAddrReg = CreateLiveInRegister(
2738  DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
2739 
2740  PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF),
2741  MVT::i64);
2742  Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, InFlag);
2743  InFlag = Chain.getValue(1);
2744  }
2745 
2746  // We don't usually want to end the call-sequence here because we would tidy
2747  // the frame up *after* the call, however in the ABI-changing tail-call case
2748  // we've carefully laid out the parameters so that when sp is reset they'll be
2749  // in the correct location.
2750  if (IsTailCall && !IsSibCall) {
2751  Chain = DAG.getCALLSEQ_END(Chain,
2752  DAG.getTargetConstant(NumBytes, DL, MVT::i32),
2753  DAG.getTargetConstant(0, DL, MVT::i32),
2754  InFlag, DL);
2755  InFlag = Chain.getValue(1);
2756  }
2757 
2758  std::vector<SDValue> Ops;
2759  Ops.push_back(Chain);
2760  Ops.push_back(Callee);
2761  // Add a redundant copy of the callee global which will not be legalized, as
2762  // we need direct access to the callee later.
2763  GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Callee);
2764  const GlobalValue *GV = GSD->getGlobal();
2765  Ops.push_back(DAG.getTargetGlobalAddress(GV, DL, MVT::i64));
2766 
2767  if (IsTailCall) {
2768  // Each tail call may have to adjust the stack by a different amount, so
2769  // this information must travel along with the operation for eventual
2770  // consumption by emitEpilogue.
2771  Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32));
2772 
2773  Ops.push_back(PhysReturnAddrReg);
2774  }
2775 
2776  // Add argument registers to the end of the list so that they are known live
2777  // into the call.
2778  for (auto &RegToPass : RegsToPass) {
2779  Ops.push_back(DAG.getRegister(RegToPass.first,
2780  RegToPass.second.getValueType()));
2781  }
2782 
2783  // Add a register mask operand representing the call-preserved registers.
2784 
2785  auto *TRI = static_cast<const SIRegisterInfo*>(Subtarget->getRegisterInfo());
2786  const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
2787  assert(Mask && "Missing call preserved mask for calling convention");
2788  Ops.push_back(DAG.getRegisterMask(Mask));
2789 
2790  if (InFlag.getNode())
2791  Ops.push_back(InFlag);
2792 
2793  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2794 
2795  // If we're doing a tall call, use a TC_RETURN here rather than an
2796  // actual call instruction.
2797  if (IsTailCall) {
2798  MFI.setHasTailCall();
2799  return DAG.getNode(AMDGPUISD::TC_RETURN, DL, NodeTys, Ops);
2800  }
2801 
2802  // Returns a chain and a flag for retval copy to use.
2803  SDValue Call = DAG.getNode(AMDGPUISD::CALL, DL, NodeTys, Ops);
2804  Chain = Call.getValue(0);
2805  InFlag = Call.getValue(1);
2806 
2807  if (CallerSavedFP) {
2808  SDValue FPReg = DAG.getRegister(Info->getFrameOffsetReg(), MVT::i32);
2809  Chain = DAG.getCopyToReg(Chain, DL, FPReg, CallerSavedFP, InFlag);
2810  InFlag = Chain.getValue(1);
2811  }
2812 
2813  uint64_t CalleePopBytes = NumBytes;
2814  Chain = DAG.getCALLSEQ_END(Chain, DAG.getTargetConstant(0, DL, MVT::i32),
2815  DAG.getTargetConstant(CalleePopBytes, DL, MVT::i32),
2816  InFlag, DL);
2817  if (!Ins.empty())
2818  InFlag = Chain.getValue(1);
2819 
2820  // Handle result values, copying them out of physregs into vregs that we
2821  // return.
2822  return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
2823  InVals, IsThisReturn,
2824  IsThisReturn ? OutVals[0] : SDValue());
2825 }
2826 
2827 unsigned SITargetLowering::getRegisterByName(const char* RegName, EVT VT,
2828  SelectionDAG &DAG) const {
2829  unsigned Reg = StringSwitch<unsigned>(RegName)
2830  .Case("m0", AMDGPU::M0)
2831  .Case("exec", AMDGPU::EXEC)
2832  .Case("exec_lo", AMDGPU::EXEC_LO)
2833  .Case("exec_hi", AMDGPU::EXEC_HI)
2834  .Case("flat_scratch", AMDGPU::FLAT_SCR)
2835  .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
2836  .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
2837  .Default(AMDGPU::NoRegister);
2838 
2839  if (Reg == AMDGPU::NoRegister) {
2840  report_fatal_error(Twine("invalid register name \""
2841  + StringRef(RegName) + "\"."));
2842 
2843  }
2844 
2845  if ((Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS ||
2846  Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10) &&
2847  Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) {
2848  report_fatal_error(Twine("invalid register \""
2849  + StringRef(RegName) + "\" for subtarget."));
2850  }
2851 
2852  switch (Reg) {
2853  case AMDGPU::M0:
2854  case AMDGPU::EXEC_LO:
2855  case AMDGPU::EXEC_HI:
2856  case AMDGPU::FLAT_SCR_LO:
2857  case AMDGPU::FLAT_SCR_HI:
2858  if (VT.getSizeInBits() == 32)
2859  return Reg;
2860  break;
2861  case AMDGPU::EXEC:
2862  case AMDGPU::FLAT_SCR:
2863  if (VT.getSizeInBits() == 64)
2864  return Reg;
2865  break;
2866  default:
2867  llvm_unreachable("missing register type checking");
2868  }
2869 
2870  report_fatal_error(Twine("invalid type for register \""
2871  + StringRef(RegName) + "\"."));
2872 }
2873 
2874 // If kill is not the last instruction, split the block so kill is always a
2875 // proper terminator.
2877  MachineBasicBlock *BB) const {
2878  const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
2879 
2880  MachineBasicBlock::iterator SplitPoint(&MI);
2881  ++SplitPoint;
2882 
2883  if (SplitPoint == BB->end()) {
2884  // Don't bother with a new block.
2886  return BB;
2887  }
2888 
2889  MachineFunction *MF = BB->getParent();
2890  MachineBasicBlock *SplitBB
2892 
2893  MF->insert(++MachineFunction::iterator(BB), SplitBB);
2894  SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end());
2895 
2896  SplitBB->transferSuccessorsAndUpdatePHIs(BB);
2897  BB->addSuccessor(SplitBB);
2898 
2900  return SplitBB;
2901 }
2902 
2903 // Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the
2904 // wavefront. If the value is uniform and just happens to be in a VGPR, this
2905 // will only do one iteration. In the worst case, this will loop 64 times.
2906 //
2907 // TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value.
2909  const SIInstrInfo *TII,
2911  MachineBasicBlock &OrigBB,
2912  MachineBasicBlock &LoopBB,
2913  const DebugLoc &DL,
2914  const MachineOperand &IdxReg,
2915  unsigned InitReg,
2916  unsigned ResultReg,
2917  unsigned PhiReg,
2918  unsigned InitSaveExecReg,
2919  int Offset,
2920  bool UseGPRIdxMode,
2921  bool IsIndirectSrc) {
2922  MachineBasicBlock::iterator I = LoopBB.begin();
2923 
2924  unsigned PhiExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2925  unsigned NewExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2926  unsigned CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2927  unsigned CondReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2928 
2929  BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg)
2930  .addReg(InitReg)
2931  .addMBB(&OrigBB)
2932  .addReg(ResultReg)
2933  .addMBB(&LoopBB);
2934 
2935  BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec)
2936  .addReg(InitSaveExecReg)
2937  .addMBB(&OrigBB)
2938  .addReg(NewExec)
2939  .addMBB(&LoopBB);
2940 
2941  // Read the next variant <- also loop target.
2942  BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg)
2943  .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef()));
2944 
2945  // Compare the just read M0 value to all possible Idx values.
2946  BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg)
2947  .addReg(CurrentIdxReg)
2948  .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg());
2949 
2950  // Update EXEC, save the original EXEC value to VCC.
2951  BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), NewExec)
2952  .addReg(CondReg, RegState::Kill);
2953 
2954  MRI.setSimpleHint(NewExec, CondReg);
2955 
2956  if (UseGPRIdxMode) {
2957  unsigned IdxReg;
2958  if (Offset == 0) {
2959  IdxReg = CurrentIdxReg;
2960  } else {
2961  IdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2962  BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), IdxReg)
2963  .addReg(CurrentIdxReg, RegState::Kill)
2964  .addImm(Offset);
2965  }
2966  unsigned IdxMode = IsIndirectSrc ?
2968  MachineInstr *SetOn =
2969  BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
2970  .addReg(IdxReg, RegState::Kill)
2971  .addImm(IdxMode);
2972  SetOn->getOperand(3).setIsUndef();
2973  } else {
2974  // Move index from VCC into M0
2975  if (Offset == 0) {
2976  BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2977  .addReg(CurrentIdxReg, RegState::Kill);
2978  } else {
2979  BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
2980  .addReg(CurrentIdxReg, RegState::Kill)
2981  .addImm(Offset);
2982  }
2983  }
2984 
2985  // Update EXEC, switch all done bits to 0 and all todo bits to 1.
2986  MachineInstr *InsertPt =
2987  BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_XOR_B64_term), AMDGPU::EXEC)
2988  .addReg(AMDGPU::EXEC)
2989  .addReg(NewExec);
2990 
2991  // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use
2992  // s_cbranch_scc0?
2993 
2994  // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover.
2995  BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
2996  .addMBB(&LoopBB);
2997 
2998  return InsertPt->getIterator();
2999 }
3000 
3001 // This has slightly sub-optimal regalloc when the source vector is killed by
3002 // the read. The register allocator does not understand that the kill is
3003 // per-workitem, so is kept alive for the whole loop so we end up not re-using a
3004 // subregister from it, using 1 more VGPR than necessary. This was saved when
3005 // this was expanded after register allocation.
3007  MachineBasicBlock &MBB,
3008  MachineInstr &MI,
3009  unsigned InitResultReg,
3010  unsigned PhiReg,
3011  int Offset,
3012  bool UseGPRIdxMode,
3013  bool IsIndirectSrc) {
3014  MachineFunction *MF = MBB.getParent();
3016  const DebugLoc &DL = MI.getDebugLoc();
3018 
3019  unsigned DstReg = MI.getOperand(0).getReg();
3020  unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
3021  unsigned TmpExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
3022 
3023  BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec);
3024 
3025  // Save the EXEC mask
3026  BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64), SaveExec)
3027  .addReg(AMDGPU::EXEC);
3028 
3029  // To insert the loop we need to split the block. Move everything after this
3030  // point to a new block, and insert a new empty block between the two.
3032  MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock();
3033  MachineFunction::iterator MBBI(MBB);
3034  ++MBBI;
3035 
3036  MF->insert(MBBI, LoopBB);
3037  MF->insert(MBBI, RemainderBB);
3038 
3039  LoopBB->addSuccessor(LoopBB);
3040  LoopBB->addSuccessor(RemainderBB);
3041 
3042  // Move the rest of the block into a new block.
3043  RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
3044  RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end());
3045 
3046  MBB.addSuccessor(LoopBB);
3047 
3048  const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3049 
3050  auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx,
3051  InitResultReg, DstReg, PhiReg, TmpExec,
3052  Offset, UseGPRIdxMode, IsIndirectSrc);
3053 
3054  MachineBasicBlock::iterator First = RemainderBB->begin();
3055  BuildMI(*RemainderBB, First, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
3056  .addReg(SaveExec);
3057 
3058  return InsPt;
3059 }
3060 
3061 // Returns subreg index, offset
3062 static std::pair<unsigned, int>
3064  const TargetRegisterClass *SuperRC,
3065  unsigned VecReg,
3066  int Offset) {
3067  int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32;
3068 
3069  // Skip out of bounds offsets, or else we would end up using an undefined
3070  // register.
3071  if (Offset >= NumElts || Offset < 0)
3072  return std::make_pair(AMDGPU::sub0, Offset);
3073 
3074  return std::make_pair(AMDGPU::sub0 + Offset, 0);
3075 }
3076 
3077 // Return true if the index is an SGPR and was set.
3080  MachineInstr &MI,
3081  int Offset,
3082  bool UseGPRIdxMode,
3083  bool IsIndirectSrc) {
3084  MachineBasicBlock *MBB = MI.getParent();
3085  const DebugLoc &DL = MI.getDebugLoc();
3087 
3088  const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3089  const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg());
3090 
3091  assert(Idx->getReg() != AMDGPU::NoRegister);
3092 
3093  if (!TII->getRegisterInfo().isSGPRClass(IdxRC))
3094  return false;
3095 
3096  if (UseGPRIdxMode) {
3097  unsigned IdxMode = IsIndirectSrc ?
3099  if (Offset == 0) {
3100  MachineInstr *SetOn =
3101  BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
3102  .add(*Idx)
3103  .addImm(IdxMode);
3104 
3105  SetOn->getOperand(3).setIsUndef();
3106  } else {
3107  unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3108  BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp)
3109  .add(*Idx)
3110  .addImm(Offset);
3111  MachineInstr *SetOn =
3112  BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
3113  .addReg(Tmp, RegState::Kill)
3114  .addImm(IdxMode);
3115 
3116  SetOn->getOperand(3).setIsUndef();
3117  }
3118 
3119  return true;
3120  }
3121 
3122  if (Offset == 0) {
3123  BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
3124  .add(*Idx);
3125  } else {
3126  BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
3127  .add(*Idx)
3128  .addImm(Offset);
3129  }
3130 
3131  return true;
3132 }
3133 
3134 // Control flow needs to be inserted if indexing with a VGPR.
3136  MachineBasicBlock &MBB,
3137  const GCNSubtarget &ST) {
3138  const SIInstrInfo *TII = ST.getInstrInfo();
3139  const SIRegisterInfo &TRI = TII->getRegisterInfo();
3140  MachineFunction *MF = MBB.getParent();
3142 
3143  unsigned Dst = MI.getOperand(0).getReg();
3144  unsigned SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg();
3145  int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
3146 
3147  const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg);
3148 
3149  unsigned SubReg;
3150  std::tie(SubReg, Offset)
3151  = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset);
3152 
3153  bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
3154 
3155  if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) {
3157  const DebugLoc &DL = MI.getDebugLoc();
3158 
3159  if (UseGPRIdxMode) {
3160  // TODO: Look at the uses to avoid the copy. This may require rescheduling
3161  // to avoid interfering with other uses, so probably requires a new
3162  // optimization pass.
3163  BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
3164  .addReg(SrcReg, RegState::Undef, SubReg)
3165  .addReg(SrcReg, RegState::Implicit)
3166  .addReg(AMDGPU::M0, RegState::Implicit);
3167  BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3168  } else {
3169  BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
3170  .addReg(SrcReg, RegState::Undef, SubReg)
3171  .addReg(SrcReg, RegState::Implicit);
3172  }
3173 
3174  MI.eraseFromParent();
3175 
3176  return &MBB;
3177  }
3178 
3179  const DebugLoc &DL = MI.getDebugLoc();
3181 
3182  unsigned PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3183  unsigned InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3184 
3185  BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg);
3186 
3187  auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg,
3188  Offset, UseGPRIdxMode, true);
3189  MachineBasicBlock *LoopBB = InsPt->getParent();
3190 
3191  if (UseGPRIdxMode) {
3192  BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
3193  .addReg(SrcReg, RegState::Undef, SubReg)
3194  .addReg(SrcReg, RegState::Implicit)
3195  .addReg(AMDGPU::M0, RegState::Implicit);
3196  BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3197  } else {
3198  BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
3199  .addReg(SrcReg, RegState::Undef, SubReg)
3200  .addReg(SrcReg, RegState::Implicit);
3201  }
3202 
3203  MI.eraseFromParent();
3204 
3205  return LoopBB;
3206 }
3207 
3208 static unsigned getMOVRELDPseudo(const SIRegisterInfo &TRI,
3209  const TargetRegisterClass *VecRC) {
3210  switch (TRI.getRegSizeInBits(*VecRC)) {
3211  case 32: // 4 bytes
3212  return AMDGPU::V_MOVRELD_B32_V1;
3213  case 64: // 8 bytes
3214  return AMDGPU::V_MOVRELD_B32_V2;
3215  case 128: // 16 bytes
3216  return AMDGPU::V_MOVRELD_B32_V4;
3217  case 256: // 32 bytes
3218  return AMDGPU::V_MOVRELD_B32_V8;
3219  case 512: // 64 bytes
3220  return AMDGPU::V_MOVRELD_B32_V16;
3221  default:
3222  llvm_unreachable("unsupported size for MOVRELD pseudos");
3223  }
3224 }
3225 
3227  MachineBasicBlock &MBB,
3228  const GCNSubtarget &ST) {
3229  const SIInstrInfo *TII = ST.getInstrInfo();
3230  const SIRegisterInfo &TRI = TII->getRegisterInfo();
3231  MachineFunction *MF = MBB.getParent();
3233 
3234  unsigned Dst = MI.getOperand(0).getReg();
3235  const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src);
3236  const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3237  const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val);
3238  int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
3239  const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg());
3240 
3241  // This can be an immediate, but will be folded later.
3242  assert(Val->getReg());
3243 
3244  unsigned SubReg;
3245  std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC,
3246  SrcVec->getReg(),
3247  Offset);
3248  bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
3249 
3250  if (Idx->getReg() == AMDGPU::NoRegister) {
3252  const DebugLoc &DL = MI.getDebugLoc();
3253 
3254  assert(Offset == 0);
3255 
3256  BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst)
3257  .add(*SrcVec)
3258  .add(*Val)
3259  .addImm(SubReg);
3260 
3261  MI.eraseFromParent();
3262  return &MBB;
3263  }
3264 
3265  if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, false)) {
3267  const DebugLoc &DL = MI.getDebugLoc();
3268 
3269  if (UseGPRIdxMode) {
3270  BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
3271  .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst
3272  .add(*Val)
3273  .addReg(Dst, RegState::ImplicitDefine)
3274  .addReg(SrcVec->getReg(), RegState::Implicit)
3275  .addReg(AMDGPU::M0, RegState::Implicit);
3276 
3277  BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3278  } else {
3279  const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
3280 
3281  BuildMI(MBB, I, DL, MovRelDesc)
3282  .addReg(Dst, RegState::Define)
3283  .addReg(SrcVec->getReg())
3284  .add(*Val)
3285  .addImm(SubReg - AMDGPU::sub0);
3286  }
3287 
3288  MI.eraseFromParent();
3289  return &MBB;
3290  }
3291 
3292  if (Val->isReg())
3293  MRI.clearKillFlags(Val->getReg());
3294 
3295  const DebugLoc &DL = MI.getDebugLoc();
3296 
3297  unsigned PhiReg = MRI.createVirtualRegister(VecRC);
3298 
3299  auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg,
3300  Offset, UseGPRIdxMode, false);
3301  MachineBasicBlock *LoopBB = InsPt->getParent();
3302 
3303  if (UseGPRIdxMode) {
3304  BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
3305  .addReg(PhiReg, RegState::Undef, SubReg) // vdst
3306  .add(*Val) // src0
3308  .addReg(PhiReg, RegState::Implicit)
3309  .addReg(AMDGPU::M0, RegState::Implicit);
3310  BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3311  } else {
3312  const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
3313 
3314  BuildMI(*LoopBB, InsPt, DL, MovRelDesc)
3315  .addReg(Dst, RegState::Define)
3316  .addReg(PhiReg)
3317  .add(*Val)
3318  .addImm(SubReg - AMDGPU::sub0);
3319  }
3320 
3321  MI.eraseFromParent();
3322 
3323  return LoopBB;
3324 }
3325 
3327  MachineInstr &MI, MachineBasicBlock *BB) const {
3328 
3329  const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3330  MachineFunction *MF = BB->getParent();
3332 
3333  if (TII->isMIMG(MI)) {
3334  if (MI.memoperands_empty() && MI.mayLoadOrStore()) {
3335  report_fatal_error("missing mem operand from MIMG instruction");
3336  }
3337  // Add a memoperand for mimg instructions so that they aren't assumed to
3338  // be ordered memory instuctions.
3339 
3340  return BB;
3341  }
3342 
3343  switch (MI.getOpcode()) {
3344  case AMDGPU::S_ADD_U64_PSEUDO:
3345  case AMDGPU::S_SUB_U64_PSEUDO: {
3347  const DebugLoc &DL = MI.getDebugLoc();
3348 
3349  MachineOperand &Dest = MI.getOperand(0);
3350  MachineOperand &Src0 = MI.getOperand(1);
3351  MachineOperand &Src1 = MI.getOperand(2);
3352 
3353  unsigned DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3354  unsigned DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3355 
3356  MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
3357  Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub0,
3358  &AMDGPU::SReg_32_XM0RegClass);
3359  MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
3360  Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub1,
3361  &AMDGPU::SReg_32_XM0RegClass);
3362 
3363  MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
3364  Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub0,
3365  &AMDGPU::SReg_32_XM0RegClass);
3366  MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
3367  Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub1,
3368  &AMDGPU::SReg_32_XM0RegClass);
3369 
3370  bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO);
3371 
3372  unsigned LoOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
3373  unsigned HiOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
3374  BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0)
3375  .add(Src0Sub0)
3376  .add(Src1Sub0);
3377  BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1)
3378  .add(Src0Sub1)
3379  .add(Src1Sub1);
3380  BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg())
3381  .addReg(DestSub0)
3382  .addImm(AMDGPU::sub0)
3383  .addReg(DestSub1)
3384  .addImm(AMDGPU::sub1);
3385  MI.eraseFromParent();
3386  return BB;
3387  }
3388  case AMDGPU::SI_INIT_M0: {
3389  BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(),
3390  TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
3391  .add(MI.getOperand(0));
3392  MI.eraseFromParent();
3393  return BB;
3394  }
3395  case AMDGPU::SI_INIT_EXEC:
3396  // This should be before all vector instructions.
3397  BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B64),
3398  AMDGPU::EXEC)
3399  .addImm(MI.getOperand(0).getImm());
3400  MI.eraseFromParent();
3401  return BB;
3402 
3403  case AMDGPU::SI_INIT_EXEC_FROM_INPUT: {
3404  // Extract the thread count from an SGPR input and set EXEC accordingly.
3405  // Since BFM can't shift by 64, handle that case with CMP + CMOV.
3406  //
3407  // S_BFE_U32 count, input, {shift, 7}
3408  // S_BFM_B64 exec, count, 0
3409  // S_CMP_EQ_U32 count, 64
3410  // S_CMOV_B64 exec, -1
3411  MachineInstr *FirstMI = &*BB->begin();
3413  unsigned InputReg = MI.getOperand(0).getReg();
3414  unsigned CountReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3415  bool Found = false;
3416 
3417  // Move the COPY of the input reg to the beginning, so that we can use it.
3418  for (auto I = BB->begin(); I != &MI; I++) {
3419  if (I->getOpcode() != TargetOpcode::COPY ||
3420  I->getOperand(0).getReg() != InputReg)
3421  continue;
3422 
3423  if (I == FirstMI) {
3424  FirstMI = &*++BB->begin();
3425  } else {
3426  I->removeFromParent();
3427  BB->insert(FirstMI, &*I);
3428  }
3429  Found = true;
3430  break;
3431  }
3432  assert(Found);
3433  (void)Found;
3434 
3435  // This should be before all vector instructions.
3436  BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFE_U32), CountReg)
3437  .addReg(InputReg)
3438  .addImm((MI.getOperand(1).getImm() & 0x7f) | 0x70000);
3439  BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFM_B64),
3440  AMDGPU::EXEC)
3441  .addReg(CountReg)
3442  .addImm(0);
3443  BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMP_EQ_U32))
3444  .addReg(CountReg, RegState::Kill)
3445  .addImm(64);
3446  BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMOV_B64),
3447  AMDGPU::EXEC)
3448  .addImm(-1);
3449  MI.eraseFromParent();
3450  return BB;
3451  }
3452 
3453  case AMDGPU::GET_GROUPSTATICSIZE: {
3454  DebugLoc DL = MI.getDebugLoc();
3455  BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32))
3456  .add(MI.getOperand(0))
3457  .addImm(MFI->getLDSSize());
3458  MI.eraseFromParent();
3459  return BB;
3460  }
3461  case AMDGPU::SI_INDIRECT_SRC_V1:
3462  case AMDGPU::SI_INDIRECT_SRC_V2:
3463  case AMDGPU::SI_INDIRECT_SRC_V4:
3464  case AMDGPU::SI_INDIRECT_SRC_V8:
3465  case AMDGPU::SI_INDIRECT_SRC_V16:
3466  return emitIndirectSrc(MI, *BB, *getSubtarget());
3467  case AMDGPU::SI_INDIRECT_DST_V1:
3468  case AMDGPU::SI_INDIRECT_DST_V2:
3469  case AMDGPU::SI_INDIRECT_DST_V4:
3470  case AMDGPU::SI_INDIRECT_DST_V8:
3471  case AMDGPU::SI_INDIRECT_DST_V16:
3472  return emitIndirectDst(MI, *BB, *getSubtarget());
3473  case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO:
3474  case AMDGPU::SI_KILL_I1_PSEUDO:
3475  return splitKillBlock(MI, BB);
3476  case AMDGPU::V_CNDMASK_B64_PSEUDO: {
3478 
3479  unsigned Dst = MI.getOperand(0).getReg();
3480  unsigned Src0 = MI.getOperand(1).getReg();
3481  unsigned Src1 = MI.getOperand(2).getReg();
3482  const DebugLoc &DL = MI.getDebugLoc();
3483  unsigned SrcCond = MI.getOperand(3).getReg();
3484 
3485  unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3486  unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3487  unsigned SrcCondCopy = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
3488 
3489  BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy)
3490  .addReg(SrcCond);
3491  BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo)
3492  .addImm(0)
3493  .addReg(Src0, 0, AMDGPU::sub0)
3494  .addImm(0)
3495  .addReg(Src1, 0, AMDGPU::sub0)
3496  .addReg(SrcCondCopy);
3497  BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi)
3498  .addImm(0)
3499  .addReg(Src0, 0, AMDGPU::sub1)
3500  .addImm(0)
3501  .addReg(Src1, 0, AMDGPU::sub1)
3502  .addReg(SrcCondCopy);
3503 
3504  BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst)
3505  .addReg(DstLo)
3506  .addImm(AMDGPU::sub0)
3507  .addReg(DstHi)
3508  .addImm(AMDGPU::sub1);
3509  MI.eraseFromParent();
3510  return BB;
3511  }
3512  case AMDGPU::SI_BR_UNDEF: {
3513  const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3514  const DebugLoc &DL = MI.getDebugLoc();
3515  MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
3516  .add(MI.getOperand(0));
3517  Br->getOperand(1).setIsUndef(true); // read undef SCC
3518  MI.eraseFromParent();
3519  return BB;
3520  }
3521  case AMDGPU::ADJCALLSTACKUP:
3522  case AMDGPU::ADJCALLSTACKDOWN: {
3524  MachineInstrBuilder MIB(*MF, &MI);
3525 
3526  // Add an implicit use of the frame offset reg to prevent the restore copy
3527  // inserted after the call from being reorderd after stack operations in the
3528  // the caller's frame.
3529  MIB.addReg(Info->getStackPtrOffsetReg(), RegState::ImplicitDefine)
3530  .addReg(Info->getStackPtrOffsetReg(), RegState::Implicit)
3531  .addReg(Info->getFrameOffsetReg(), RegState::Implicit);
3532  return BB;
3533  }
3534  case AMDGPU::SI_CALL_ISEL: {
3535  const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3536  const DebugLoc &DL = MI.getDebugLoc();
3537 
3538  unsigned ReturnAddrReg = TII->getRegisterInfo().getReturnAddressReg(*MF);
3539 
3540  MachineInstrBuilder MIB;
3541  MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_CALL), ReturnAddrReg);
3542 
3543  for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I)
3544  MIB.add(MI.getOperand(I));
3545 
3546  MIB.cloneMemRefs(MI);
3547  MI.eraseFromParent();
3548  return BB;
3549  }
3550  case AMDGPU::V_ADD_I32_e32:
3551  case AMDGPU::V_SUB_I32_e32:
3552  case AMDGPU::V_SUBREV_I32_e32: {
3553  // TODO: Define distinct V_*_I32_Pseudo instructions instead.
3554  const DebugLoc &DL = MI.getDebugLoc();
3555  unsigned Opc = MI.getOpcode();
3556 
3557  bool NeedClampOperand = false;
3558  if (TII->pseudoToMCOpcode(Opc) == -1) {
3559  Opc = AMDGPU::getVOPe64(Opc);
3560  NeedClampOperand = true;
3561  }
3562 
3563  auto I = BuildMI(*BB, MI, DL, TII->get(Opc), MI.getOperand(0).getReg());
3564  if (TII->isVOP3(*I)) {
3565  I.addReg(AMDGPU::VCC, RegState::Define);
3566  }
3567  I.add(MI.getOperand(1))
3568  .add(MI.getOperand(2));
3569  if (NeedClampOperand)
3570  I.addImm(0); // clamp bit for e64 encoding
3571 
3572  TII->legalizeOperands(*I);
3573 
3574  MI.eraseFromParent();
3575  return BB;
3576  }
3577  default:
3579  }
3580 }
3581 
3583  return isTypeLegal(VT.getScalarType());
3584 }
3585 
3587  // This currently forces unfolding various combinations of fsub into fma with
3588  // free fneg'd operands. As long as we have fast FMA (controlled by
3589  // isFMAFasterThanFMulAndFAdd), we should perform these.
3590 
3591  // When fma is quarter rate, for f64 where add / sub are at best half rate,
3592  // most of these combines appear to be cycle neutral but save on instruction
3593  // count / code size.
3594  return true;
3595 }
3596 
3598  EVT VT) const {
3599  if (!VT.isVector()) {
3600  return MVT::i1;
3601  }
3602  return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements());
3603 }
3604 
3606  // TODO: Should i16 be used always if legal? For now it would force VALU
3607  // shifts.
3608  return (VT == MVT::i16) ? MVT::i16 : MVT::i32;
3609 }
3610 
3611 // Answering this is somewhat tricky and depends on the specific device which
3612 // have different rates for fma or all f64 operations.
3613 //
3614 // v_fma_f64 and v_mul_f64 always take the same number of cycles as each other
3615 // regardless of which device (although the number of cycles differs between
3616 // devices), so it is always profitable for f64.
3617 //
3618 // v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable
3619 // only on full rate devices. Normally, we should prefer selecting v_mad_f32
3620 // which we can always do even without fused FP ops since it returns the same
3621 // result as the separate operations and since it is always full
3622 // rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32
3623 // however does not support denormals, so we do report fma as faster if we have
3624 // a fast fma device and require denormals.
3625 //
3627  VT = VT.getScalarType();
3628 
3629  switch (VT.getSimpleVT().SimpleTy) {
3630  case MVT::f32: {
3631  // This is as fast on some subtargets. However, we always have full rate f32
3632  // mad available which returns the same result as the separate operations
3633  // which we should prefer over fma. We can't use this if we want to support
3634  // denormals, so only report this in these cases.
3635  if (Subtarget->hasFP32Denormals())
3636  return Subtarget->hasFastFMAF32() || Subtarget->hasDLInsts();
3637 
3638  // If the subtarget has v_fmac_f32, that's just as good as v_mac_f32.
3639  return Subtarget->hasFastFMAF32() && Subtarget->hasDLInsts();
3640  }
3641  case MVT::f64:
3642  return true;
3643  case MVT::f16:
3644  return Subtarget->has16BitInsts() && Subtarget->hasFP16Denormals();
3645  default:
3646  break;
3647  }
3648 
3649  return false;
3650 }
3651 
3652 //===----------------------------------------------------------------------===//
3653 // Custom DAG Lowering Operations
3654 //===----------------------------------------------------------------------===//
3655 
3656 // Work around LegalizeDAG doing the wrong thing and fully scalarizing if the
3657 // wider vector type is legal.
3659  SelectionDAG &DAG) const {
3660  unsigned Opc = Op.getOpcode();
3661  EVT VT = Op.getValueType();
3662  assert(VT == MVT::v4f16);
3663 
3664  SDValue Lo, Hi;
3665  std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
3666 
3667  SDLoc SL(Op);
3668  SDValue OpLo = DAG.getNode(Opc, SL, Lo.getValueType(), Lo,
3669  Op->getFlags());
3670  SDValue OpHi = DAG.getNode(Opc, SL, Hi.getValueType(), Hi,
3671  Op->getFlags());
3672 
3673  return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
3674 }
3675 
3676 // Work around LegalizeDAG doing the wrong thing and fully scalarizing if the
3677 // wider vector type is legal.
3679  SelectionDAG &DAG) const {
3680  unsigned Opc = Op.getOpcode();
3681  EVT VT = Op.getValueType();
3682  assert(VT == MVT::v4i16 || VT == MVT::v4f16);
3683 
3684  SDValue Lo0, Hi0;
3685  std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0);
3686  SDValue Lo1, Hi1;
3687  std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1);
3688 
3689  SDLoc SL(Op);
3690 
3691  SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1,
3692  Op->getFlags());
3693  SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1,
3694  Op->getFlags());
3695 
3696  return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
3697 }
3698 
3700  switch (Op.getOpcode()) {
3701  default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
3702  case ISD::BRCOND: return LowerBRCOND(Op, DAG);
3703  case ISD::LOAD: {
3704  SDValue Result = LowerLOAD(Op, DAG);
3705  assert((!Result.getNode() ||
3706  Result.getNode()->getNumValues() == 2) &&
3707  "Load should return a value and a chain");
3708  return Result;
3709  }
3710 
3711  case ISD::FSIN:
3712  case ISD::FCOS:
3713  return LowerTrig(Op, DAG);
3714  case ISD::SELECT: return LowerSELECT(Op, DAG);
3715  case ISD::FDIV: return LowerFDIV(Op, DAG);
3716  case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG);
3717  case ISD::STORE: return LowerSTORE(Op, DAG);
3718  case ISD::GlobalAddress: {
3719  MachineFunction &MF = DAG.getMachineFunction();
3721  return LowerGlobalAddress(MFI, Op, DAG);
3722  }
3723  case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3724  case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG);
3725  case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
3726  case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG);
3728  return lowerINSERT_VECTOR_ELT(Op, DAG);
3730  return lowerEXTRACT_VECTOR_ELT(Op, DAG);
3731  case ISD::BUILD_VECTOR:
3732  return lowerBUILD_VECTOR(Op, DAG);
3733  case ISD::FP_ROUND:
3734  return lowerFP_ROUND(Op, DAG);
3735  case ISD::TRAP:
3736  return lowerTRAP(Op, DAG);
3737  case ISD::DEBUGTRAP:
3738  return lowerDEBUGTRAP(Op, DAG);
3739  case ISD::FABS:
3740  case ISD::FNEG:
3741  case ISD::FCANONICALIZE:
3742  return splitUnaryVectorOp(Op, DAG);
3743  case ISD::FMINNUM:
3744  case ISD::FMAXNUM:
3745  return lowerFMINNUM_FMAXNUM(Op, DAG);
3746  case ISD::SHL:
3747  case ISD::SRA:
3748  case ISD::SRL:
3749  case ISD::ADD:
3750  case ISD::SUB:
3751  case ISD::MUL:
3752  case ISD::SMIN:
3753  case ISD::SMAX:
3754  case ISD::UMIN:
3755  case ISD::UMAX:
3756  case ISD::FADD:
3757  case ISD::FMUL:
3758  case ISD::FMINNUM_IEEE:
3759  case ISD::FMAXNUM_IEEE:
3760  return splitBinaryVectorOp(Op, DAG);
3761  }
3762  return SDValue();
3763 }
3764 
3766  const SDLoc &DL,
3767  SelectionDAG &DAG, bool Unpacked) {
3768  if (!LoadVT.isVector())
3769  return Result;
3770 
3771  if (Unpacked) { // From v2i32/v4i32 back to v2f16/v4f16.
3772  // Truncate to v2i16/v4i16.
3773  EVT IntLoadVT = LoadVT.changeTypeToInteger();
3774 
3775  // Workaround legalizer not scalarizing truncate after vector op
3776  // legalization byt not creating intermediate vector trunc.
3778  DAG.ExtractVectorElements(Result, Elts);
3779  for (SDValue &Elt : Elts)
3780  Elt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Elt);
3781 
3782  Result = DAG.getBuildVector(IntLoadVT, DL, Elts);
3783 
3784  // Bitcast to original type (v2f16/v4f16).
3785  return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result);
3786  }
3787 
3788  // Cast back to the original packed type.
3789  return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result);
3790 }
3791 
3792 SDValue SITargetLowering::adjustLoadValueType(unsigned Opcode,
3793  MemSDNode *M,
3794  SelectionDAG &DAG,
3795  ArrayRef<SDValue> Ops,
3796  bool IsIntrinsic) const {
3797  SDLoc DL(M);
3798 
3799  bool Unpacked = Subtarget->hasUnpackedD16VMem();
3800  EVT LoadVT = M->getValueType(0);
3801 
3802  EVT EquivLoadVT = LoadVT;
3803  if (Unpacked && LoadVT.isVector()) {
3804  EquivLoadVT = LoadVT.isVector() ?
3806  LoadVT.getVectorNumElements()) : LoadVT;
3807  }
3808 
3809  // Change from v4f16/v2f16 to EquivLoadVT.
3810  SDVTList VTList = DAG.getVTList(EquivLoadVT, MVT::Other);
3811 
3812  SDValue Load
3813  = DAG.getMemIntrinsicNode(
3814  IsIntrinsic ? (unsigned)ISD::INTRINSIC_W_CHAIN : Opcode, DL,
3815  VTList, Ops, M->getMemoryVT(),
3816  M->getMemOperand());
3817  if (!Unpacked) // Just adjusted the opcode.
3818  return Load;
3819 
3820  SDValue Adjusted = adjustLoadValueTypeImpl(Load, LoadVT, DL, DAG, Unpacked);
3821 
3822  return DAG.getMergeValues({ Adjusted, Load.getValue(1) }, DL);
3823 }
3824 
3826  SDNode *N, SelectionDAG &DAG) {
3827  EVT VT = N->getValueType(0);
3828  const auto *CD = cast<ConstantSDNode>(N->getOperand(3));
3829  int CondCode = CD->getSExtValue();
3830  if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE ||
3831  CondCode > ICmpInst::Predicate::LAST_ICMP_PREDICATE)
3832  return DAG.getUNDEF(VT);
3833 
3834  ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode);
3835 
3836 
3837  SDValue LHS = N->getOperand(1);
3838  SDValue RHS = N->getOperand(2);
3839 
3840  SDLoc DL(N);
3841 
3842  EVT CmpVT = LHS.getValueType();
3843  if (CmpVT == MVT::i16 && !TLI.isTypeLegal(MVT::i16)) {
3844  unsigned PromoteOp = ICmpInst::isSigned(IcInput) ?
3846  LHS = DAG.getNode(PromoteOp, DL, MVT::i32, LHS);
3847  RHS = DAG.getNode(PromoteOp, DL, MVT::i32, RHS);
3848  }
3849 
3850  ISD::CondCode CCOpcode = getICmpCondCode(IcInput);
3851 
3852  return DAG.getNode(AMDGPUISD::SETCC, DL, VT, LHS, RHS,
3853  DAG.getCondCode(CCOpcode));
3854 }
3855 
3857  SDNode *N, SelectionDAG &DAG) {
3858  EVT VT = N->getValueType(0);
3859  const auto *CD = cast<ConstantSDNode>(N->getOperand(3));
3860 
3861  int CondCode = CD->getSExtValue();
3862  if (CondCode < FCmpInst::Predicate::FIRST_FCMP_PREDICATE ||
3863  CondCode > FCmpInst::Predicate::LAST_FCMP_PREDICATE) {
3864  return DAG.getUNDEF(VT);
3865  }
3866 
3867  SDValue Src0 = N->getOperand(1);
3868  SDValue Src1 = N->getOperand(2);
3869  EVT CmpVT = Src0.getValueType();
3870  SDLoc SL(N);
3871 
3872  if (CmpVT == MVT::f16 && !TLI.isTypeLegal(CmpVT)) {
3873  Src0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0);
3874  Src1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1);
3875  }
3876 
3877  FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode);
3878  ISD::CondCode CCOpcode = getFCmpCondCode(IcInput);
3879  return DAG.getNode(AMDGPUISD::SETCC, SL, VT, Src0,
3880  Src1, DAG.getCondCode(CCOpcode));
3881 }
3882 
3885  SelectionDAG &DAG) const {
3886  switch (N->getOpcode()) {
3887  case ISD::INSERT_VECTOR_ELT: {
3888  if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG))
3889  Results.push_back(Res);
3890  return;
3891  }
3892  case ISD::EXTRACT_VECTOR_ELT: {
3893  if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG))
3894  Results.push_back(Res);
3895  return;
3896  }
3897  case ISD::INTRINSIC_WO_CHAIN: {
3898  unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
3899  switch (IID) {
3900  case Intrinsic::amdgcn_cvt_pkrtz: {
3901  SDValue Src0 = N->getOperand(1);
3902  SDValue Src1 = N->getOperand(2);
3903  SDLoc SL(N);
3905  Src0, Src1);
3906  Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt));
3907  return;
3908  }
3909  case Intrinsic::amdgcn_cvt_pknorm_i16:
3910  case Intrinsic::amdgcn_cvt_pknorm_u16:
3911  case Intrinsic::amdgcn_cvt_pk_i16:
3912  case Intrinsic::amdgcn_cvt_pk_u16: {
3913  SDValue Src0 = N->getOperand(1);
3914  SDValue Src1 = N->getOperand(2);
3915  SDLoc SL(N);
3916  unsigned Opcode;
3917 
3918  if (IID == Intrinsic::amdgcn_cvt_pknorm_i16)
3920  else if (IID == Intrinsic::amdgcn_cvt_pknorm_u16)
3922  else if (IID == Intrinsic::amdgcn_cvt_pk_i16)
3923  Opcode = AMDGPUISD::CVT_PK_I16_I32;
3924  else
3925  Opcode = AMDGPUISD::CVT_PK_U16_U32;
3926 
3927  EVT VT = N->getValueType(0);
3928  if (isTypeLegal(VT))
3929  Results.push_back(DAG.getNode(Opcode, SL, VT, Src0, Src1));
3930  else {
3931  SDValue Cvt = DAG.getNode(Opcode, SL, MVT::i32, Src0, Src1);
3932  Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, Cvt));
3933  }
3934  return;
3935  }
3936  }
3937  break;
3938  }
3939  case ISD::INTRINSIC_W_CHAIN: {
3940  if (SDValue Res = LowerINTRINSIC_W_CHAIN(SDValue(N, 0), DAG)) {
3941  Results.push_back(Res);
3942  Results.push_back(Res.getValue(1));
3943  return;
3944  }
3945 
3946  break;
3947  }
3948  case ISD::SELECT: {
3949  SDLoc SL(N);
3950  EVT VT = N->getValueType(0);
3951  EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
3952  SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1));
3953  SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2));
3954 
3955  EVT SelectVT = NewVT;
3956  if (NewVT.bitsLT(MVT::i32)) {
3957  LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS);
3958  RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS);
3959  SelectVT = MVT::i32;
3960  }
3961 
3962  SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT,
3963  N->getOperand(0), LHS, RHS);
3964 
3965  if (NewVT != SelectVT)
3966  NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect);
3967  Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect));
3968  return;
3969  }
3970  case ISD::FNEG: {
3971  if (N->getValueType(0) != MVT::v2f16)
3972  break;
3973 
3974  SDLoc SL(N);
3975  SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0));
3976 
3977  SDValue Op = DAG.getNode(ISD::XOR, SL, MVT::i32,
3978  BC,
3979  DAG.getConstant(0x80008000, SL, MVT::i32));
3980  Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op));
3981  return;
3982  }
3983  case ISD::FABS: {
3984  if (N->getValueType(0) != MVT::v2f16)
3985  break;
3986 
3987  SDLoc SL(N);
3988  SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0));
3989 
3990  SDValue Op = DAG.getNode(ISD::AND, SL, MVT::i32,
3991  BC,
3992  DAG.getConstant(0x7fff7fff, SL, MVT::i32));
3993  Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op));
3994  return;
3995  }
3996  default:
3997  break;
3998  }
3999 }
4000 
4001 /// Helper function for LowerBRCOND
4002 static SDNode *findUser(SDValue Value, unsigned Opcode) {
4003 
4004  SDNode *Parent = Value.getNode();
4005  for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end();
4006  I != E; ++I) {
4007 
4008  if (I.getUse().get() != Value)
4009  continue;
4010 
4011  if (I->getOpcode() == Opcode)
4012  return *I;
4013  }
4014  return nullptr;
4015 }
4016 
4017 unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const {
4018  if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
4019  switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) {
4020  case Intrinsic::amdgcn_if:
4021  return AMDGPUISD::IF;
4022  case Intrinsic::amdgcn_else:
4023  return AMDGPUISD::ELSE;
4024  case Intrinsic::amdgcn_loop:
4025  return AMDGPUISD::LOOP;
4026  case Intrinsic::amdgcn_end_cf:
4027  llvm_unreachable("should not occur");
4028  default:
4029  return 0;
4030  }
4031  }
4032 
4033  // break, if_break, else_break are all only used as inputs to loop, not
4034  // directly as branch conditions.
4035  return 0;
4036 }
4037 
4038 bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const {
4039  const Triple &TT = getTargetMachine().getTargetTriple();
4040  return (GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
4043 }
4044 
4045 bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const {
4046  // FIXME: Either avoid relying on address space here or change the default
4047  // address space for functions to avoid the explicit check.
4048  return (GV->getValueType()->isFunctionTy() ||
4052  !shouldEmitFixup(GV) &&
4054 }
4055 
4056 bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const {
4057  return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV);
4058 }
4059 
4060 /// This transforms the control flow intrinsics to get the branch destination as
4061 /// last parameter, also switches branch target with BR if the need arise
4062 SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
4063  SelectionDAG &DAG) const {
4064  SDLoc DL(BRCOND);
4065 
4066  SDNode *Intr = BRCOND.getOperand(1).getNode();
4067  SDValue Target = BRCOND.getOperand(2);
4068  SDNode *BR = nullptr;
4069  SDNode *SetCC = nullptr;
4070 
4071  if (Intr->getOpcode() == ISD::SETCC) {
4072  // As long as we negate the condition everything is fine
4073  SetCC = Intr;
4074  Intr = SetCC->getOperand(0).getNode();
4075 
4076  } else {
4077  // Get the target from BR if we don't negate the condition
4078  BR = findUser(BRCOND, ISD::BR);
4079  Target = BR->getOperand(1);
4080  }
4081 
4082  // FIXME: This changes the types of the intrinsics instead of introducing new
4083  // nodes with the correct types.
4084  // e.g. llvm.amdgcn.loop
4085 
4086  // eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3
4087  // => t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088>
4088 
4089  unsigned CFNode = isCFIntrinsic(Intr);
4090  if (CFNode == 0) {
4091  // This is a uniform branch so we don't need to legalize.
4092  return BRCOND;
4093  }
4094 
4095  bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID ||
4096  Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN;
4097 
4098  assert(!SetCC ||
4099  (SetCC->getConstantOperandVal(1) == 1 &&
4100  cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() ==
4101  ISD::SETNE));
4102 
4103  // operands of the new intrinsic call
4105  if (HaveChain)
4106  Ops.push_back(BRCOND.getOperand(0));
4107 
4108  Ops.append(Intr->op_begin() + (HaveChain ? 2 : 1), Intr->op_end());
4109  Ops.push_back(Target);
4110 
4111  ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end());
4112 
4113  // build the new intrinsic call
4114  SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode();
4115 
4116  if (!HaveChain) {
4117  SDValue Ops[] = {
4118  SDValue(Result, 0),
4119  BRCOND.getOperand(0)
4120  };
4121 
4122  Result = DAG.getMergeValues(Ops, DL).getNode();
4123  }
4124 
4125  if (BR) {
4126  // Give the branch instruction our target
4127  SDValue Ops[] = {
4128  BR->getOperand(0),
4129  BRCOND.getOperand(2)
4130  };
4131  SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops);
4132  DAG.ReplaceAllUsesWith(BR, NewBR.getNode());
4133  BR = NewBR.getNode();
4134  }
4135 
4136  SDValue Chain = SDValue(Result, Result->getNumValues() - 1);
4137 
4138  // Copy the intrinsic results to registers
4139  for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) {
4141  if (!CopyToReg)
4142  continue;
4143 
4144  Chain = DAG.getCopyToReg(
4145  Chain, DL,
4146  CopyToReg->getOperand(1),
4147  SDValue(Result, i - 1),
4148  SDValue());
4149 
4150  DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0));
4151  }
4152 
4153  // Remove the old intrinsic from the chain
4155  SDValue(Intr, Intr->getNumValues() - 1),
4156  Intr->getOperand(0));
4157 
4158  return Chain;
4159 }
4160 
4161 SDValue SITargetLowering::getFPExtOrFPTrunc(SelectionDAG &DAG,
4162  SDValue Op,
4163  const SDLoc &DL,
4164  EVT VT) const {
4165  return Op.getValueType().bitsLE(VT) ?
4166  DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) :
4167  DAG.getNode(ISD::FTRUNC, DL, VT, Op);
4168 }
4169 
4170 SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
4171  assert(Op.getValueType() == MVT::f16 &&
4172  "Do not know how to custom lower FP_ROUND for non-f16 type");
4173 
4174  SDValue Src = Op.getOperand(0);
4175  EVT SrcVT = Src.getValueType();
4176  if (SrcVT != MVT::f64)
4177  return Op;
4178 
4179  SDLoc DL(Op);
4180 
4181  SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src);
4182  SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16);
4183  return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc);
4184 }
4185 
4186 SDValue SITargetLowering::lowerFMINNUM_FMAXNUM(SDValue Op,
4187  SelectionDAG &DAG) const {
4188  EVT VT = Op.getValueType();
4189  const MachineFunction &MF = DAG.getMachineFunction();
4191  bool IsIEEEMode = Info->getMode().IEEE;
4192 
4193  // FIXME: Assert during eslection that this is only selected for
4194  // ieee_mode. Currently a combine can produce the ieee version for non-ieee
4195  // mode functions, but this happens to be OK since it's only done in cases
4196  // where there is known no sNaN.
4197  if (IsIEEEMode)
4198  return expandFMINNUM_FMAXNUM(Op.getNode(), DAG);
4199 
4200  if (VT == MVT::v4f16)
4201  return splitBinaryVectorOp(Op, DAG);
4202  return Op;
4203 }
4204 
4205 SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const {
4206  SDLoc SL(Op);
4207  SDValue Chain = Op.getOperand(0);
4208 
4209  if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa ||
4210  !Subtarget->isTrapHandlerEnabled())
4211  return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain);
4212 
4213  MachineFunction &MF = DAG.getMachineFunction();
4215  unsigned UserSGPR = Info->getQueuePtrUserSGPR();
4216  assert(UserSGPR != AMDGPU::NoRegister);
4217  SDValue QueuePtr = CreateLiveInRegister(
4218  DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
4219  SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64);
4220  SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01,
4221  QueuePtr, SDValue());
4222  SDValue Ops[] = {
4223  ToReg,
4225  SGPR01,
4226  ToReg.getValue(1)
4227  };
4228  return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
4229 }
4230 
4231 SDValue SITargetLowering::lowerDEBUGTRAP(SDValue Op, SelectionDAG &DAG) const {
4232  SDLoc SL(Op);
4233  SDValue Chain = Op.getOperand(0);
4234  MachineFunction &MF = DAG.getMachineFunction();
4235 
4236  if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa ||
4237  !Subtarget->isTrapHandlerEnabled()) {
4239  "debugtrap handler not supported",
4240  Op.getDebugLoc(),
4241  DS_Warning);
4242  LLVMContext &Ctx = MF.getFunction().getContext();
4243  Ctx.diagnose(NoTrap);
4244  return Chain;
4245  }
4246 
4247  SDValue Ops[] = {
4248  Chain,
4250  };
4251  return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
4252 }
4253 
4254 SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL,
4255  SelectionDAG &DAG) const {
4256  // FIXME: Use inline constants (src_{shared, private}_base) instead.
4257  if (Subtarget->hasApertureRegs()) {
4258  unsigned Offset = AS == AMDGPUAS::LOCAL_ADDRESS ?
4261  unsigned WidthM1 = AS == AMDGPUAS::LOCAL_ADDRESS ?
4264  unsigned Encoding =
4266  Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ |
4267  WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_;
4268 
4269  SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16);
4270  SDValue ApertureReg = SDValue(
4271  DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0);
4272  SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32);
4273  return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount);
4274  }
4275 
4276  MachineFunction &MF = DAG.getMachineFunction();
4278  unsigned UserSGPR = Info->getQueuePtrUserSGPR();
4279  assert(UserSGPR != AMDGPU::NoRegister);
4280 
4281  SDValue QueuePtr = CreateLiveInRegister(
4282  DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
4283 
4284  // Offset into amd_queue_t for group_segment_aperture_base_hi /
4285  // private_segment_aperture_base_hi.
4286  uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44;
4287 
4288  SDValue Ptr = DAG.getObjectPtrOffset(DL, QueuePtr, StructOffset);
4289 
4290  // TODO: Use custom target PseudoSourceValue.
4291  // TODO: We should use the value from the IR intrinsic call, but it might not
4292  // be available and how do we get it?
4295 
4296  MachinePointerInfo PtrInfo(V, StructOffset);
4297  return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo,
4298  MinAlign(64, StructOffset),
4301 }
4302 
4303 SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
4304  SelectionDAG &DAG) const {
4305  SDLoc SL(Op);
4306  const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op);
4307 
4308  SDValue Src = ASC->getOperand(0);
4309  SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64);
4310 
4311  const AMDGPUTargetMachine &TM =
4312  static_cast<const AMDGPUTargetMachine &>(getTargetMachine());
4313 
4314  // flat -> local/private
4316  unsigned DestAS = ASC->getDestAddressSpace();
4317 
4318  if (DestAS == AMDGPUAS::LOCAL_ADDRESS ||
4319  DestAS == AMDGPUAS::PRIVATE_ADDRESS) {
4320  unsigned NullVal = TM.getNullPointerValue(DestAS);
4321  SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
4322  SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE);
4323  SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
4324 
4325  return DAG.getNode(ISD::SELECT, SL, MVT::i32,
4326  NonNull, Ptr, SegmentNullPtr);
4327  }
4328  }
4329 
4330  // local/private -> flat
4332  unsigned SrcAS = ASC->getSrcAddressSpace();
4333 
4334  if (SrcAS == AMDGPUAS::LOCAL_ADDRESS ||
4335  SrcAS == AMDGPUAS::PRIVATE_ADDRESS) {
4336  unsigned NullVal = TM.getNullPointerValue(SrcAS);
4337  SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
4338 
4339  SDValue NonNull
4340  = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE);
4341 
4342  SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG);
4343  SDValue CvtPtr
4344  = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture);
4345 
4346  return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull,
4347  DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr),
4348  FlatNullPtr);
4349  }
4350  }
4351 
4352  // global <-> flat are no-ops and never emitted.
4353 
4354  const MachineFunction &MF = DAG.getMachineFunction();
4355  DiagnosticInfoUnsupported InvalidAddrSpaceCast(
4356  MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc());
4357  DAG.getContext()->diagnose(InvalidAddrSpaceCast);
4358 
4359  return DAG.getUNDEF(ASC->getValueType(0));
4360 }
4361 
4362 SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4363  SelectionDAG &DAG) const {
4364  SDValue Vec = Op.getOperand(0);
4365  SDValue InsVal = Op.getOperand(1);
4366  SDValue Idx = Op.getOperand(2);
4367  EVT VecVT = Vec.getValueType();
4368  EVT EltVT = VecVT.getVectorElementType();
4369  unsigned VecSize = VecVT.getSizeInBits();
4370  unsigned EltSize = EltVT.getSizeInBits();
4371 
4372 
4373  assert(VecSize <= 64);
4374 
4375  unsigned NumElts = VecVT.getVectorNumElements();
4376  SDLoc SL(Op);
4377  auto KIdx = dyn_cast<ConstantSDNode>(Idx);
4378 
4379  if (NumElts == 4 && EltSize == 16 && KIdx) {
4380  SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Vec);
4381 
4382  SDValue LoHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec,
4383  DAG.getConstant(0, SL, MVT::i32));
4384  SDValue HiHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec,
4385  DAG.getConstant(1, SL, MVT::i32));
4386 
4387  SDValue LoVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, LoHalf);
4388  SDValue HiVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, HiHalf);
4389 
4390  unsigned Idx = KIdx->getZExtValue();
4391  bool InsertLo = Idx < 2;
4392  SDValue InsHalf = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, MVT::v2i16,
4393  InsertLo ? LoVec : HiVec,
4394  DAG.getNode(ISD::BITCAST, SL, MVT::i16, InsVal),
4395  DAG.getConstant(InsertLo ? Idx : (Idx - 2), SL, MVT::i32));
4396 
4397  InsHalf = DAG.getNode(ISD::BITCAST, SL, MVT::i32, InsHalf);
4398 
4399  SDValue Concat = InsertLo ?
4400  DAG.getBuildVector(MVT::v2i32, SL, { InsHalf, HiHalf }) :
4401  DAG.getBuildVector(MVT::v2i32, SL, { LoHalf, InsHalf });
4402 
4403  return DAG.getNode(ISD::BITCAST, SL, VecVT, Concat);
4404  }
4405 
4406  if (isa<ConstantSDNode>(Idx))
4407  return SDValue();
4408 
4409  MVT IntVT = MVT::getIntegerVT(VecSize);
4410 
4411  // Avoid stack access for dynamic indexing.
4412  // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec
4413 
4414  // Create a congruent vector with the target value in each element so that
4415  // the required element can be masked and ORed into the target vector.
4416  SDValue ExtVal = DAG.getNode(ISD::BITCAST, SL, IntVT,
4417  DAG.getSplatBuildVector(VecVT, SL, InsVal));
4418 
4419  assert(isPowerOf2_32(EltSize));
4420  SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32);
4421 
4422  // Convert vector index to bit-index.
4423  SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor);
4424