27 #include "llvm/IR/IntrinsicsSPIRV.h"
30 #define DEBUG_TYPE "spirv-isel"
36 #define GET_GLOBALISEL_PREDICATE_BITSET
37 #include "SPIRVGenGlobalISel.inc"
38 #undef GET_GLOBALISEL_PREDICATE_BITSET
59 #define GET_GLOBALISEL_PREDICATES_DECL
60 #include "SPIRVGenGlobalISel.inc"
61 #undef GET_GLOBALISEL_PREDICATES_DECL
63 #define GET_GLOBALISEL_TEMPORARIES_DECL
64 #include "SPIRVGenGlobalISel.inc"
65 #undef GET_GLOBALISEL_TEMPORARIES_DECL
82 unsigned Opcode)
const;
84 unsigned Opcode)
const;
126 bool IsSigned)
const;
128 bool IsSigned,
unsigned Opcode)
const;
130 bool IsSigned)
const;
164 const SPIRVType *ResType =
nullptr)
const;
173 #define GET_GLOBALISEL_IMPL
174 #include "SPIRVGenGlobalISel.inc"
175 #undef GET_GLOBALISEL_IMPL
181 TRI(*
ST.getRegisterInfo()), RBI(RBI), GR(*
ST.getSPIRVGlobalRegistry()),
183 #
include "SPIRVGenGlobalISel.inc"
186 #
include "SPIRVGenGlobalISel.inc"
196 GR.setCurrentFunc(MF);
197 InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI,
BFI);
204 assert(
I.getParent() &&
"Instruction should be in a basic block!");
205 assert(
I.getParent()->getParent() &&
"Instruction should be in a function!");
210 if (Opcode == SPIRV::ASSIGN_TYPE) {
214 assert(Res ||
Def->getOpcode() == TargetOpcode::G_CONSTANT);
219 I.removeFromParent();
220 }
else if (
I.getNumDefs() == 1) {
222 MRI->
setType(
I.getOperand(0).getReg(), LLT::scalar(32));
227 if (
I.getNumOperands() !=
I.getNumExplicitOperands()) {
228 LLVM_DEBUG(
errs() <<
"Generic instr has unexpected implicit operands\n");
234 bool HasDefs =
I.getNumDefs() > 0;
236 SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) :
nullptr;
237 assert(!HasDefs || ResType ||
I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
238 if (spvSelect(ResVReg, ResType,
I)) {
241 I.removeFromParent();
247 bool SPIRVInstructionSelector::spvSelect(
Register ResVReg,
251 I.getOpcode() == TargetOpcode::G_CONSTANT);
252 const unsigned Opcode =
I.getOpcode();
254 case TargetOpcode::G_CONSTANT:
255 return selectConst(ResVReg, ResType,
I.getOperand(1).getCImm()->getValue(),
257 case TargetOpcode::G_GLOBAL_VALUE:
258 return selectGlobalValue(ResVReg,
I);
259 case TargetOpcode::G_IMPLICIT_DEF:
260 return selectOpUndef(ResVReg, ResType,
I);
262 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
263 return selectIntrinsic(ResVReg, ResType,
I);
264 case TargetOpcode::G_BITREVERSE:
265 return selectBitreverse(ResVReg, ResType,
I);
267 case TargetOpcode::G_BUILD_VECTOR:
268 return selectConstVector(ResVReg, ResType,
I);
270 case TargetOpcode::G_SHUFFLE_VECTOR: {
272 auto MIB =
BuildMI(
BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorShuffle))
274 .
addUse(GR.getSPIRVTypeID(ResType))
275 .
addUse(
I.getOperand(1).getReg())
276 .
addUse(
I.getOperand(2).getReg());
277 for (
auto V :
I.getOperand(3).getShuffleMask())
281 case TargetOpcode::G_MEMMOVE:
282 case TargetOpcode::G_MEMCPY:
283 return selectMemOperation(ResVReg,
I);
285 case TargetOpcode::G_ICMP:
286 return selectICmp(ResVReg, ResType,
I);
287 case TargetOpcode::G_FCMP:
288 return selectFCmp(ResVReg, ResType,
I);
290 case TargetOpcode::G_FRAME_INDEX:
291 return selectFrameIndex(ResVReg, ResType,
I);
293 case TargetOpcode::G_LOAD:
294 return selectLoad(ResVReg, ResType,
I);
295 case TargetOpcode::G_STORE:
296 return selectStore(
I);
298 case TargetOpcode::G_BR:
299 return selectBranch(
I);
300 case TargetOpcode::G_BRCOND:
301 return selectBranchCond(
I);
303 case TargetOpcode::G_PHI:
304 return selectPhi(ResVReg, ResType,
I);
306 case TargetOpcode::G_FPTOSI:
307 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToS);
308 case TargetOpcode::G_FPTOUI:
309 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToU);
311 case TargetOpcode::G_SITOFP:
312 return selectIToF(ResVReg, ResType,
I,
true, SPIRV::OpConvertSToF);
313 case TargetOpcode::G_UITOFP:
314 return selectIToF(ResVReg, ResType,
I,
false, SPIRV::OpConvertUToF);
316 case TargetOpcode::G_CTPOP:
317 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitCount);
319 case TargetOpcode::G_SEXT:
320 return selectExt(ResVReg, ResType,
I,
true);
321 case TargetOpcode::G_ANYEXT:
322 case TargetOpcode::G_ZEXT:
323 return selectExt(ResVReg, ResType,
I,
false);
324 case TargetOpcode::G_TRUNC:
325 return selectTrunc(ResVReg, ResType,
I);
326 case TargetOpcode::G_FPTRUNC:
327 case TargetOpcode::G_FPEXT:
328 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpFConvert);
330 case TargetOpcode::G_PTRTOINT:
331 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertPtrToU);
332 case TargetOpcode::G_INTTOPTR:
333 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertUToPtr);
334 case TargetOpcode::G_BITCAST:
335 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitcast);
336 case TargetOpcode::G_ADDRSPACE_CAST:
337 return selectAddrSpaceCast(ResVReg, ResType,
I);
339 case TargetOpcode::G_ATOMICRMW_OR:
340 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicOr);
341 case TargetOpcode::G_ATOMICRMW_ADD:
342 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicIAdd);
343 case TargetOpcode::G_ATOMICRMW_AND:
344 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicAnd);
345 case TargetOpcode::G_ATOMICRMW_MAX:
346 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMax);
347 case TargetOpcode::G_ATOMICRMW_MIN:
348 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMin);
349 case TargetOpcode::G_ATOMICRMW_SUB:
350 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicISub);
351 case TargetOpcode::G_ATOMICRMW_XOR:
352 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicXor);
353 case TargetOpcode::G_ATOMICRMW_UMAX:
354 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMax);
355 case TargetOpcode::G_ATOMICRMW_UMIN:
356 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMin);
357 case TargetOpcode::G_ATOMICRMW_XCHG:
358 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicExchange);
359 case TargetOpcode::G_ATOMIC_CMPXCHG:
360 return selectAtomicCmpXchg(ResVReg, ResType,
I);
362 case TargetOpcode::G_FENCE:
363 return selectFence(
I);
370 bool SPIRVInstructionSelector::selectUnOpWithSrc(
Register ResVReg,
374 unsigned Opcode)
const {
375 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
377 .
addUse(GR.getSPIRVTypeID(ResType))
382 bool SPIRVInstructionSelector::selectUnOp(
Register ResVReg,
385 unsigned Opcode)
const {
386 return selectUnOpWithSrc(ResVReg, ResType,
I,
I.getOperand(1).getReg(),
392 case AtomicOrdering::Acquire:
393 return SPIRV::MemorySemantics::Acquire;
394 case AtomicOrdering::Release:
395 return SPIRV::MemorySemantics::Release;
396 case AtomicOrdering::AcquireRelease:
397 return SPIRV::MemorySemantics::AcquireRelease;
398 case AtomicOrdering::SequentiallyConsistent:
399 return SPIRV::MemorySemantics::SequentiallyConsistent;
401 case AtomicOrdering::Monotonic:
402 case AtomicOrdering::NotAtomic:
410 return SPIRV::Scope::Invocation;
412 return SPIRV::Scope::Device;
421 if (
MemOp->isVolatile())
422 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
423 if (
MemOp->isNonTemporal())
424 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
425 if (
MemOp->getAlign().value())
426 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
430 if (SpvMemOp &
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
437 if (Flags & MachineMemOperand::Flags::MOVolatile)
438 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
439 if (Flags & MachineMemOperand::Flags::MONonTemporal)
440 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
446 bool SPIRVInstructionSelector::selectLoad(
Register ResVReg,
450 I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ? 1 : 0;
451 Register Ptr =
I.getOperand(1 + OpOffset).getReg();
452 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
454 .
addUse(GR.getSPIRVTypeID(ResType))
456 if (!
I.getNumMemOperands()) {
457 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS);
462 return MIB.constrainAllUses(
TII,
TRI, RBI);
465 bool SPIRVInstructionSelector::selectStore(
MachineInstr &
I)
const {
467 I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ? 1 : 0;
468 Register StoreVal =
I.getOperand(0 + OpOffset).getReg();
469 Register Ptr =
I.getOperand(1 + OpOffset).getReg();
471 auto MIB =
BuildMI(
BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpStore))
474 if (!
I.getNumMemOperands()) {
475 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS);
480 return MIB.constrainAllUses(
TII,
TRI, RBI);
483 bool SPIRVInstructionSelector::selectMemOperation(
Register ResVReg,
486 auto MIB =
BuildMI(
BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCopyMemorySized))
487 .
addDef(
I.getOperand(0).getReg())
488 .
addUse(
I.getOperand(1).getReg())
489 .
addUse(
I.getOperand(2).getReg());
490 if (
I.getNumMemOperands())
493 if (ResVReg.
isValid() && ResVReg != MIB->getOperand(0).getReg()) {
494 BuildMI(
BB,
I,
I.getDebugLoc(),
TII.get(TargetOpcode::COPY), ResVReg)
495 .
addUse(MIB->getOperand(0).getReg());
500 bool SPIRVInstructionSelector::selectAtomicRMW(
Register ResVReg,
503 unsigned NewOpcode)
const {
515 Register MemSemReg = buildI32Constant(MemSem ,
I);
517 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(NewOpcode))
519 .
addUse(GR.getSPIRVTypeID(ResType))
523 .
addUse(
I.getOperand(2).getReg())
527 bool SPIRVInstructionSelector::selectFence(
MachineInstr &
I)
const {
530 Register MemSemReg = buildI32Constant(MemSem,
I);
535 return BuildMI(
BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpMemoryBarrier))
541 bool SPIRVInstructionSelector::selectAtomicCmpXchg(
Register ResVReg,
553 SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
558 Register MemSemEqReg = buildI32Constant(MemSemEq,
I);
562 MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq,
I);
564 return BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpAtomicCompareExchange))
566 .
addUse(GR.getSPIRVTypeID(SpvValTy))
578 case SPIRV::StorageClass::Workgroup:
579 case SPIRV::StorageClass::CrossWorkgroup:
580 case SPIRV::StorageClass::Function:
592 bool SPIRVInstructionSelector::selectAddrSpaceCast(
Register ResVReg,
595 Register SrcPtr =
I.getOperand(1).getReg();
596 SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
602 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
605 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
609 SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
615 .
addUse(GR.getSPIRVTypeID(GenericPtrTy))
620 .
addUse(GR.getSPIRVTypeID(ResType))
626 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitcast);
632 case CmpInst::FCMP_OEQ:
633 return SPIRV::OpFOrdEqual;
634 case CmpInst::FCMP_OGE:
635 return SPIRV::OpFOrdGreaterThanEqual;
636 case CmpInst::FCMP_OGT:
637 return SPIRV::OpFOrdGreaterThan;
638 case CmpInst::FCMP_OLE:
639 return SPIRV::OpFOrdLessThanEqual;
640 case CmpInst::FCMP_OLT:
641 return SPIRV::OpFOrdLessThan;
642 case CmpInst::FCMP_ONE:
643 return SPIRV::OpFOrdNotEqual;
644 case CmpInst::FCMP_ORD:
645 return SPIRV::OpOrdered;
646 case CmpInst::FCMP_UEQ:
647 return SPIRV::OpFUnordEqual;
648 case CmpInst::FCMP_UGE:
649 return SPIRV::OpFUnordGreaterThanEqual;
650 case CmpInst::FCMP_UGT:
651 return SPIRV::OpFUnordGreaterThan;
652 case CmpInst::FCMP_ULE:
653 return SPIRV::OpFUnordLessThanEqual;
654 case CmpInst::FCMP_ULT:
655 return SPIRV::OpFUnordLessThan;
656 case CmpInst::FCMP_UNE:
657 return SPIRV::OpFUnordNotEqual;
658 case CmpInst::FCMP_UNO:
659 return SPIRV::OpUnordered;
668 case CmpInst::ICMP_EQ:
669 return SPIRV::OpIEqual;
670 case CmpInst::ICMP_NE:
671 return SPIRV::OpINotEqual;
672 case CmpInst::ICMP_SGE:
673 return SPIRV::OpSGreaterThanEqual;
674 case CmpInst::ICMP_SGT:
675 return SPIRV::OpSGreaterThan;
676 case CmpInst::ICMP_SLE:
677 return SPIRV::OpSLessThanEqual;
678 case CmpInst::ICMP_SLT:
679 return SPIRV::OpSLessThan;
680 case CmpInst::ICMP_UGE:
681 return SPIRV::OpUGreaterThanEqual;
682 case CmpInst::ICMP_UGT:
683 return SPIRV::OpUGreaterThan;
684 case CmpInst::ICMP_ULE:
685 return SPIRV::OpULessThanEqual;
686 case CmpInst::ICMP_ULT:
687 return SPIRV::OpULessThan;
695 case CmpInst::ICMP_EQ:
696 return SPIRV::OpPtrEqual;
697 case CmpInst::ICMP_NE:
698 return SPIRV::OpPtrNotEqual;
708 case CmpInst::ICMP_EQ:
709 return SPIRV::OpLogicalEqual;
710 case CmpInst::ICMP_NE:
711 return SPIRV::OpLogicalNotEqual;
717 bool SPIRVInstructionSelector::selectBitreverse(
Register ResVReg,
721 return BuildMI(
BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitReverse))
723 .
addUse(GR.getSPIRVTypeID(ResType))
724 .
addUse(
I.getOperand(1).getReg())
728 bool SPIRVInstructionSelector::selectConstVector(
Register ResVReg,
738 SPIRVType *ConstTy = this->MRI->getVRegDef(MO.getReg());
739 assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
740 ConstTy->getOperand(1).isReg());
741 Register ConstReg = ConstTy->getOperand(1).getReg();
742 const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
744 return (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
745 Const->getOpcode() == TargetOpcode::G_FCONSTANT);
748 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
749 TII.get(SPIRV::OpConstantComposite))
751 .
addUse(GR.getSPIRVTypeID(ResType));
752 for (
unsigned i =
I.getNumExplicitDefs();
i <
I.getNumExplicitOperands(); ++
i)
753 MIB.
addUse(
I.getOperand(
i).getReg());
757 bool SPIRVInstructionSelector::selectCmp(
Register ResVReg,
761 Register Cmp0 =
I.getOperand(2).getReg();
762 Register Cmp1 =
I.getOperand(3).getReg();
763 assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
764 GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
765 "CMP operands should have the same type");
766 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(CmpOpc))
768 .
addUse(GR.getSPIRVTypeID(ResType))
774 bool SPIRVInstructionSelector::selectICmp(
Register ResVReg,
777 auto Pred =
I.getOperand(1).getPredicate();
780 Register CmpOperand =
I.getOperand(2).getReg();
781 if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
783 else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
787 return selectCmp(ResVReg, ResType, CmpOpc,
I);
793 assert(
I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
794 "Expected G_FCONSTANT");
795 const ConstantFP *FPImm =
I.getOperand(1).getFPImm();
802 assert(
I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
803 "Expected G_CONSTANT");
804 addNumImm(
I.getOperand(1).getCImm()->getValue(), MIB);
811 ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32,
I,
TII);
819 .
addUse(GR.getSPIRVTypeID(SpvI32Ty));
823 .
addUse(GR.getSPIRVTypeID(SpvI32Ty))
830 bool SPIRVInstructionSelector::selectFCmp(
Register ResVReg,
834 return selectCmp(ResVReg, ResType, CmpOp,
I);
839 return buildI32Constant(0,
I, ResType);
842 Register SPIRVInstructionSelector::buildOnesVal(
bool AllOnes,
845 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
849 if (ResType->
getOpcode() == SPIRV::OpTypeVector) {
852 unsigned Opcode = SPIRV::OpConstantComposite;
853 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
855 .
addUse(GR.getSPIRVTypeID(ResType));
856 for (
unsigned i = 0;
i < NumEles; ++
i)
864 bool SPIRVInstructionSelector::selectSelect(
Register ResVReg,
867 bool IsSigned)
const {
869 Register ZeroReg = buildZerosVal(ResType,
I);
870 Register OneReg = buildOnesVal(IsSigned, ResType,
I);
872 GR.isScalarOfType(
I.getOperand(1).getReg(), SPIRV::OpTypeBool);
874 IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectSIVCond;
875 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
877 .
addUse(GR.getSPIRVTypeID(ResType))
878 .
addUse(
I.getOperand(1).getReg())
884 bool SPIRVInstructionSelector::selectIToF(
Register ResVReg,
887 unsigned Opcode)
const {
888 Register SrcReg =
I.getOperand(1).getReg();
891 if (GR.isScalarOrVectorOfType(
I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
892 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
894 if (ResType->
getOpcode() == SPIRV::OpTypeVector) {
896 TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts,
I,
TII);
899 selectSelect(SrcReg, TmpType,
I,
false);
901 return selectUnOpWithSrc(ResVReg, ResType,
I, SrcReg, Opcode);
904 bool SPIRVInstructionSelector::selectExt(
Register ResVReg,
907 if (GR.isScalarOrVectorOfType(
I.getOperand(1).getReg(), SPIRV::OpTypeBool))
908 return selectSelect(ResVReg, ResType,
I, IsSigned);
909 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
910 return selectUnOp(ResVReg, ResType,
I, Opcode);
913 bool SPIRVInstructionSelector::selectIntToBool(
Register IntReg,
920 bool IsVectorTy = IntTy->
getOpcode() == SPIRV::OpTypeVector;
921 unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
923 Register One = buildOnesVal(
false, IntTy,
I);
927 .
addUse(GR.getSPIRVTypeID(IntTy))
931 return BuildMI(
BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpINotEqual))
933 .
addUse(GR.getSPIRVTypeID(BoolTy))
939 bool SPIRVInstructionSelector::selectTrunc(
Register ResVReg,
942 if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool)) {
943 Register IntReg =
I.getOperand(1).getReg();
944 const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
945 return selectIntToBool(IntReg, ResVReg, ArgType, ResType,
I);
947 bool IsSigned = GR.isScalarOrVectorSigned(ResType);
948 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
949 return selectUnOp(ResVReg, ResType,
I, Opcode);
952 bool SPIRVInstructionSelector::selectConst(
Register ResVReg,
958 if (ResType->
getOpcode() == SPIRV::OpTypePointer &&
Imm.isNullValue()) {
959 return BuildMI(
BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
961 .
addUse(GR.getSPIRVTypeID(ResType))
964 auto MIB =
BuildMI(
BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantI))
966 .
addUse(GR.getSPIRVTypeID(ResType));
973 bool SPIRVInstructionSelector::selectOpUndef(
Register ResVReg,
976 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
978 .
addUse(GR.getSPIRVTypeID(ResType))
985 if (TypeInst->
getOpcode() != SPIRV::ASSIGN_TYPE)
989 return ImmInst->
getOpcode() == TargetOpcode::G_CONSTANT;
999 bool SPIRVInstructionSelector::selectInsertVal(
Register ResVReg,
1003 return BuildMI(
BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeInsert))
1005 .
addUse(GR.getSPIRVTypeID(ResType))
1007 .
addUse(
I.getOperand(3).getReg())
1009 .
addUse(
I.getOperand(2).getReg())
1015 bool SPIRVInstructionSelector::selectExtractVal(
Register ResVReg,
1019 return BuildMI(
BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1021 .
addUse(GR.getSPIRVTypeID(ResType))
1022 .
addUse(
I.getOperand(2).getReg())
1028 bool SPIRVInstructionSelector::selectInsertElt(
Register ResVReg,
1032 return selectInsertVal(ResVReg, ResType,
I);
1034 return BuildMI(
BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorInsertDynamic))
1036 .
addUse(GR.getSPIRVTypeID(ResType))
1037 .
addUse(
I.getOperand(2).getReg())
1038 .
addUse(
I.getOperand(3).getReg())
1039 .
addUse(
I.getOperand(4).getReg())
1043 bool SPIRVInstructionSelector::selectExtractElt(
Register ResVReg,
1047 return selectExtractVal(ResVReg, ResType,
I);
1049 return BuildMI(
BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorExtractDynamic))
1051 .
addUse(GR.getSPIRVTypeID(ResType))
1052 .
addUse(
I.getOperand(2).getReg())
1053 .
addUse(
I.getOperand(3).getReg())
1057 bool SPIRVInstructionSelector::selectGEP(
Register ResVReg,
1063 unsigned Opcode =
I.getOperand(2).getImm() ? SPIRV::OpInBoundsPtrAccessChain
1064 : SPIRV::OpPtrAccessChain;
1065 auto Res =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
1067 .
addUse(GR.getSPIRVTypeID(ResType))
1069 .
addUse(
I.getOperand(3).getReg());
1071 for (
unsigned i = 4;
i <
I.getNumExplicitOperands(); ++
i)
1072 Res.
addUse(
I.getOperand(
i).getReg());
1076 bool SPIRVInstructionSelector::selectIntrinsic(
Register ResVReg,
1080 switch (
I.getIntrinsicID()) {
1081 case Intrinsic::spv_load:
1082 return selectLoad(ResVReg, ResType,
I);
1084 case Intrinsic::spv_store:
1085 return selectStore(
I);
1087 case Intrinsic::spv_extractv:
1088 return selectExtractVal(ResVReg, ResType,
I);
1090 case Intrinsic::spv_insertv:
1091 return selectInsertVal(ResVReg, ResType,
I);
1093 case Intrinsic::spv_extractelt:
1094 return selectExtractElt(ResVReg, ResType,
I);
1096 case Intrinsic::spv_insertelt:
1097 return selectInsertElt(ResVReg, ResType,
I);
1099 case Intrinsic::spv_gep:
1100 return selectGEP(ResVReg, ResType,
I);
1102 case Intrinsic::spv_unref_global:
1103 case Intrinsic::spv_init_global: {
1109 return selectGlobalValue(
MI->getOperand(0).getReg(), *
MI,
Init);
1111 case Intrinsic::spv_const_composite: {
1113 bool IsNull =
I.getNumExplicitDefs() + 1 ==
I.getNumExplicitOperands();
1115 IsNull ? SPIRV::OpConstantNull : SPIRV::OpConstantComposite;
1118 .
addUse(GR.getSPIRVTypeID(ResType));
1121 for (
unsigned i =
I.getNumExplicitDefs() + 1;
1122 i <
I.getNumExplicitOperands(); ++
i) {
1123 MIB.
addUse(
I.getOperand(
i).getReg());
1128 case Intrinsic::spv_assign_name: {
1129 auto MIB =
BuildMI(
BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpName));
1130 MIB.
addUse(
I.getOperand(
I.getNumExplicitDefs() + 1).getReg());
1131 for (
unsigned i =
I.getNumExplicitDefs() + 2;
1132 i <
I.getNumExplicitOperands(); ++
i) {
1133 MIB.
addImm(
I.getOperand(
i).getImm());
1137 case Intrinsic::spv_switch: {
1138 auto MIB =
BuildMI(
BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSwitch));
1139 for (
unsigned i = 1;
i <
I.getNumExplicitOperands(); ++
i) {
1140 if (
I.getOperand(
i).isReg())
1141 MIB.
addReg(
I.getOperand(
i).getReg());
1142 else if (
I.getOperand(
i).isCImm())
1143 addNumImm(
I.getOperand(
i).getCImm()->getValue(), MIB);
1144 else if (
I.getOperand(
i).isMBB())
1145 MIB.
addMBB(
I.getOperand(
i).getMBB());
1157 bool SPIRVInstructionSelector::selectFrameIndex(
Register ResVReg,
1160 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVariable))
1162 .
addUse(GR.getSPIRVTypeID(ResType))
1167 bool SPIRVInstructionSelector::selectBranch(
MachineInstr &
I)
const {
1174 if (PrevI !=
nullptr && PrevI->
getOpcode() == TargetOpcode::G_BRCOND) {
1175 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
1178 .
addMBB(
I.getOperand(0).getMBB())
1182 .
addMBB(
I.getOperand(0).getMBB())
1186 bool SPIRVInstructionSelector::selectBranchCond(
MachineInstr &
I)
const {
1199 if (NextI !=
nullptr && NextI->
getOpcode() == SPIRV::OpBranchConditional)
1206 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
1207 .
addUse(
I.getOperand(0).getReg())
1208 .
addMBB(
I.getOperand(1).getMBB())
1213 bool SPIRVInstructionSelector::selectPhi(
Register ResVReg,
1216 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpPhi))
1218 .
addUse(GR.getSPIRVTypeID(ResType));
1219 const unsigned NumOps =
I.getNumOperands();
1220 for (
unsigned i = 1;
i < NumOps;
i += 2) {
1221 MIB.
addUse(
I.getOperand(
i + 0).getReg());
1222 MIB.
addMBB(
I.getOperand(
i + 1).getMBB());
1227 bool SPIRVInstructionSelector::selectGlobalValue(
1232 SPIRVType *ResType = GR.getOrCreateSPIRVType(
1233 GV->
getType(), MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
false);
1237 auto GlobalVar = cast<GlobalVariable>(GV);
1239 bool HasInit =
GlobalVar->hasInitializer() &&
1240 !isa<UndefValue>(
GlobalVar->getInitializer());
1243 if (HasInit && !
Init)
1248 bool HasLnkTy = GV->
getLinkage() != GlobalValue::InternalLinkage &&
1249 Storage != SPIRV::StorageClass::Function;
1252 ? SPIRV::LinkageType::Import
1253 : SPIRV::LinkageType::Export;
1255 Register Reg = GR.buildGlobalVariable(ResVReg, ResType, GlobalIdent, GV,
1257 HasLnkTy, LnkType, MIRBuilder,
true);
1258 return Reg.isValid();
1266 return new SPIRVInstructionSelector(
TM, Subtarget, RBI);