32#include "llvm/IR/IntrinsicsSPIRV.h"
36#define DEBUG_TYPE "spirv-isel"
43 std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
47llvm::SPIRV::SelectionControl::SelectionControl
48getSelectionOperandForImm(
int Imm) {
50 return SPIRV::SelectionControl::Flatten;
52 return SPIRV::SelectionControl::DontFlatten;
54 return SPIRV::SelectionControl::None;
58#define GET_GLOBALISEL_PREDICATE_BITSET
59#include "SPIRVGenGlobalISel.inc"
60#undef GET_GLOBALISEL_PREDICATE_BITSET
87#define GET_GLOBALISEL_PREDICATES_DECL
88#include "SPIRVGenGlobalISel.inc"
89#undef GET_GLOBALISEL_PREDICATES_DECL
91#define GET_GLOBALISEL_TEMPORARIES_DECL
92#include "SPIRVGenGlobalISel.inc"
93#undef GET_GLOBALISEL_TEMPORARIES_DECL
117 unsigned BitSetOpcode)
const;
121 unsigned BitSetOpcode)
const;
125 unsigned BitSetOpcode,
bool SwapPrimarySide)
const;
129 unsigned BitSetOpcode,
130 bool SwapPrimarySide)
const;
137 unsigned Opcode)
const;
140 unsigned Opcode)
const;
160 unsigned NegateOpcode = 0)
const;
220 template <
bool Signed>
223 template <
bool Signed>
244 bool IsSigned,
unsigned Opcode)
const;
246 bool IsSigned)
const;
252 bool IsSigned)
const;
291 GL::GLSLExtInst GLInst)
const;
296 GL::GLSLExtInst GLInst)
const;
318 bool selectCounterHandleFromBinding(
Register &ResVReg,
329 bool selectResourceNonUniformIndex(
Register &ResVReg,
341 std::pair<Register, bool>
343 const SPIRVType *ResType =
nullptr)
const;
355 SPIRV::StorageClass::StorageClass SC)
const;
362 SPIRV::StorageClass::StorageClass SC,
374 bool loadVec3BuiltinInputID(SPIRV::BuiltIn::BuiltIn BuiltInValue,
377 bool loadBuiltinInputID(SPIRV::BuiltIn::BuiltIn BuiltInValue,
382 void decorateUsesAsNonUniform(
Register &NonUniformReg)
const;
386bool sampledTypeIsSignedInteger(
const llvm::Type *HandleType) {
388 if (
TET->getTargetExtName() ==
"spirv.Image") {
391 assert(
TET->getTargetExtName() ==
"spirv.SignedImage");
392 return TET->getTypeParameter(0)->isIntegerTy();
396#define GET_GLOBALISEL_IMPL
397#include "SPIRVGenGlobalISel.inc"
398#undef GET_GLOBALISEL_IMPL
404 TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
407#include
"SPIRVGenGlobalISel.inc"
410#include
"SPIRVGenGlobalISel.inc"
422 InstructionSelector::setupMF(MF, VT, CoverageInfo, PSI, BFI);
426void SPIRVInstructionSelector::resetVRegsType(MachineFunction &MF) {
427 if (HasVRegsReset == &MF)
432 for (
unsigned I = 0,
E =
MRI.getNumVirtRegs();
I !=
E; ++
I) {
434 LLT RegType =
MRI.getType(
Reg);
442 for (
const auto &
MBB : MF) {
443 for (
const auto &
MI :
MBB) {
446 if (
MI.getOpcode() != SPIRV::ASSIGN_TYPE)
450 LLT DstType =
MRI.getType(DstReg);
452 LLT SrcType =
MRI.getType(SrcReg);
453 if (DstType != SrcType)
454 MRI.setType(DstReg,
MRI.getType(SrcReg));
456 const TargetRegisterClass *DstRC =
MRI.getRegClassOrNull(DstReg);
457 const TargetRegisterClass *SrcRC =
MRI.getRegClassOrNull(SrcReg);
458 if (DstRC != SrcRC && SrcRC)
459 MRI.setRegClass(DstReg, SrcRC);
475 case TargetOpcode::G_CONSTANT:
476 case TargetOpcode::G_FCONSTANT:
477 case TargetOpcode::G_IMPLICIT_DEF:
479 case TargetOpcode::G_INTRINSIC:
480 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
481 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
483 Intrinsic::spv_const_composite;
484 case TargetOpcode::G_BUILD_VECTOR:
485 case TargetOpcode::G_SPLAT_VECTOR: {
496 case SPIRV::OpConstantTrue:
497 case SPIRV::OpConstantFalse:
498 case SPIRV::OpConstantI:
499 case SPIRV::OpConstantF:
500 case SPIRV::OpConstantComposite:
501 case SPIRV::OpConstantCompositeContinuedINTEL:
502 case SPIRV::OpConstantSampler:
503 case SPIRV::OpConstantNull:
505 case SPIRV::OpConstantFunctionPointerINTEL:
531 case Intrinsic::spv_all:
532 case Intrinsic::spv_alloca:
533 case Intrinsic::spv_any:
534 case Intrinsic::spv_bitcast:
535 case Intrinsic::spv_const_composite:
536 case Intrinsic::spv_cross:
537 case Intrinsic::spv_degrees:
538 case Intrinsic::spv_distance:
539 case Intrinsic::spv_extractelt:
540 case Intrinsic::spv_extractv:
541 case Intrinsic::spv_faceforward:
542 case Intrinsic::spv_fdot:
543 case Intrinsic::spv_firstbitlow:
544 case Intrinsic::spv_firstbitshigh:
545 case Intrinsic::spv_firstbituhigh:
546 case Intrinsic::spv_frac:
547 case Intrinsic::spv_gep:
548 case Intrinsic::spv_global_offset:
549 case Intrinsic::spv_global_size:
550 case Intrinsic::spv_group_id:
551 case Intrinsic::spv_insertelt:
552 case Intrinsic::spv_insertv:
553 case Intrinsic::spv_isinf:
554 case Intrinsic::spv_isnan:
555 case Intrinsic::spv_lerp:
556 case Intrinsic::spv_length:
557 case Intrinsic::spv_normalize:
558 case Intrinsic::spv_num_subgroups:
559 case Intrinsic::spv_num_workgroups:
560 case Intrinsic::spv_ptrcast:
561 case Intrinsic::spv_radians:
562 case Intrinsic::spv_reflect:
563 case Intrinsic::spv_refract:
564 case Intrinsic::spv_resource_getpointer:
565 case Intrinsic::spv_resource_handlefrombinding:
566 case Intrinsic::spv_resource_handlefromimplicitbinding:
567 case Intrinsic::spv_resource_nonuniformindex:
568 case Intrinsic::spv_rsqrt:
569 case Intrinsic::spv_saturate:
570 case Intrinsic::spv_sdot:
571 case Intrinsic::spv_sign:
572 case Intrinsic::spv_smoothstep:
573 case Intrinsic::spv_step:
574 case Intrinsic::spv_subgroup_id:
575 case Intrinsic::spv_subgroup_local_invocation_id:
576 case Intrinsic::spv_subgroup_max_size:
577 case Intrinsic::spv_subgroup_size:
578 case Intrinsic::spv_thread_id:
579 case Intrinsic::spv_thread_id_in_group:
580 case Intrinsic::spv_udot:
581 case Intrinsic::spv_undef:
582 case Intrinsic::spv_value_md:
583 case Intrinsic::spv_workgroup_size:
595 case SPIRV::OpTypeVoid:
596 case SPIRV::OpTypeBool:
597 case SPIRV::OpTypeInt:
598 case SPIRV::OpTypeFloat:
599 case SPIRV::OpTypeVector:
600 case SPIRV::OpTypeMatrix:
601 case SPIRV::OpTypeImage:
602 case SPIRV::OpTypeSampler:
603 case SPIRV::OpTypeSampledImage:
604 case SPIRV::OpTypeArray:
605 case SPIRV::OpTypeRuntimeArray:
606 case SPIRV::OpTypeStruct:
607 case SPIRV::OpTypeOpaque:
608 case SPIRV::OpTypePointer:
609 case SPIRV::OpTypeFunction:
610 case SPIRV::OpTypeEvent:
611 case SPIRV::OpTypeDeviceEvent:
612 case SPIRV::OpTypeReserveId:
613 case SPIRV::OpTypeQueue:
614 case SPIRV::OpTypePipe:
615 case SPIRV::OpTypeForwardPointer:
616 case SPIRV::OpTypePipeStorage:
617 case SPIRV::OpTypeNamedBarrier:
618 case SPIRV::OpTypeAccelerationStructureNV:
619 case SPIRV::OpTypeCooperativeMatrixNV:
620 case SPIRV::OpTypeCooperativeMatrixKHR:
630 if (
MI.getNumDefs() == 0)
633 for (
const auto &MO :
MI.all_defs()) {
635 if (
Reg.isPhysical()) {
639 for (
const auto &
UseMI :
MRI.use_nodbg_instructions(
Reg)) {
640 if (
UseMI.getOpcode() != SPIRV::OpName) {
647 if (
MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE ||
MI.isFakeUse() ||
648 MI.isLifetimeMarker()) {
651 <<
"Not dead: Opcode is LOCAL_ESCAPE, fake use, or lifetime marker.\n");
662 if (
MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
663 MI.getOpcode() == TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS) {
666 LLVM_DEBUG(
dbgs() <<
"Dead: Intrinsic with no real side effects.\n");
671 if (
MI.mayStore() ||
MI.isCall() ||
672 (
MI.mayLoad() &&
MI.hasOrderedMemoryRef()) ||
MI.isPosition() ||
673 MI.isDebugInstr() ||
MI.isTerminator() ||
MI.isJumpTableDebugInfo()) {
674 LLVM_DEBUG(
dbgs() <<
"Not dead: instruction has side effects.\n");
685 LLVM_DEBUG(
dbgs() <<
"Dead: known opcode with no side effects\n");
692void SPIRVInstructionSelector::removeOpNamesForDeadMI(MachineInstr &
MI)
const {
694 for (
const auto &MO :
MI.all_defs()) {
698 SmallVector<MachineInstr *, 4> UselessOpNames;
699 for (MachineInstr &
UseMI :
MRI->use_nodbg_instructions(
Reg)) {
701 "There is still a use of the dead function.");
704 for (MachineInstr *OpNameMI : UselessOpNames) {
706 OpNameMI->eraseFromParent();
711void SPIRVInstructionSelector::removeDeadInstruction(MachineInstr &
MI)
const {
714 removeOpNamesForDeadMI(
MI);
715 MI.eraseFromParent();
718bool SPIRVInstructionSelector::select(MachineInstr &
I) {
719 resetVRegsType(*
I.getParent()->getParent());
721 assert(
I.getParent() &&
"Instruction should be in a basic block!");
722 assert(
I.getParent()->getParent() &&
"Instruction should be in a function!");
727 removeDeadInstruction(
I);
734 if (Opcode == SPIRV::ASSIGN_TYPE) {
735 Register DstReg =
I.getOperand(0).getReg();
736 Register SrcReg =
I.getOperand(1).getReg();
737 auto *
Def =
MRI->getVRegDef(SrcReg);
739 Def->getOpcode() != TargetOpcode::G_CONSTANT &&
740 Def->getOpcode() != TargetOpcode::G_FCONSTANT) {
742 if (
Def->getOpcode() == TargetOpcode::G_SELECT) {
743 Register SelectDstReg =
Def->getOperand(0).getReg();
747 Def->removeFromParent();
748 MRI->replaceRegWith(DstReg, SelectDstReg);
750 I.removeFromParent();
752 Res = selectImpl(
I, *CoverageInfo);
754 if (!Res &&
Def->getOpcode() != TargetOpcode::G_CONSTANT) {
755 dbgs() <<
"Unexpected pattern in ASSIGN_TYPE.\nInstruction: ";
759 assert(Res ||
Def->getOpcode() == TargetOpcode::G_CONSTANT);
766 MRI->setRegClass(SrcReg,
MRI->getRegClass(DstReg));
767 MRI->replaceRegWith(SrcReg, DstReg);
769 I.removeFromParent();
771 }
else if (
I.getNumDefs() == 1) {
778 if (DeadMIs.contains(&
I)) {
782 removeDeadInstruction(
I);
786 if (
I.getNumOperands() !=
I.getNumExplicitOperands()) {
787 LLVM_DEBUG(
errs() <<
"Generic instr has unexpected implicit operands\n");
793 bool HasDefs =
I.getNumDefs() > 0;
796 assert(!HasDefs || ResType ||
I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
797 I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
798 if (spvSelect(ResVReg, ResType,
I)) {
800 for (
unsigned i = 0; i <
I.getNumDefs(); ++i)
803 I.removeFromParent();
811 case TargetOpcode::G_CONSTANT:
812 case TargetOpcode::G_FCONSTANT:
814 case TargetOpcode::G_SADDO:
815 case TargetOpcode::G_SSUBO:
822 MachineInstr &
I)
const {
823 const TargetRegisterClass *DstRC =
MRI->getRegClassOrNull(DestReg);
824 const TargetRegisterClass *SrcRC =
MRI->getRegClassOrNull(SrcReg);
825 if (DstRC != SrcRC && SrcRC)
826 MRI->setRegClass(DestReg, SrcRC);
827 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
828 TII.get(TargetOpcode::COPY))
834bool SPIRVInstructionSelector::spvSelect(
Register ResVReg,
836 MachineInstr &
I)
const {
837 const unsigned Opcode =
I.getOpcode();
839 return selectImpl(
I, *CoverageInfo);
841 case TargetOpcode::G_CONSTANT:
842 case TargetOpcode::G_FCONSTANT:
843 return selectConst(ResVReg, ResType,
I);
844 case TargetOpcode::G_GLOBAL_VALUE:
845 return selectGlobalValue(ResVReg,
I);
846 case TargetOpcode::G_IMPLICIT_DEF:
847 return selectOpUndef(ResVReg, ResType,
I);
848 case TargetOpcode::G_FREEZE:
849 return selectFreeze(ResVReg, ResType,
I);
851 case TargetOpcode::G_INTRINSIC:
852 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
853 case TargetOpcode::G_INTRINSIC_CONVERGENT:
854 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
855 return selectIntrinsic(ResVReg, ResType,
I);
856 case TargetOpcode::G_BITREVERSE:
857 return selectBitreverse(ResVReg, ResType,
I);
859 case TargetOpcode::G_BUILD_VECTOR:
860 return selectBuildVector(ResVReg, ResType,
I);
861 case TargetOpcode::G_SPLAT_VECTOR:
862 return selectSplatVector(ResVReg, ResType,
I);
864 case TargetOpcode::G_SHUFFLE_VECTOR: {
865 MachineBasicBlock &BB = *
I.getParent();
866 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorShuffle))
869 .
addUse(
I.getOperand(1).getReg())
870 .
addUse(
I.getOperand(2).getReg());
871 for (
auto V :
I.getOperand(3).getShuffleMask())
875 case TargetOpcode::G_MEMMOVE:
876 case TargetOpcode::G_MEMCPY:
877 case TargetOpcode::G_MEMSET:
878 return selectMemOperation(ResVReg,
I);
880 case TargetOpcode::G_ICMP:
881 return selectICmp(ResVReg, ResType,
I);
882 case TargetOpcode::G_FCMP:
883 return selectFCmp(ResVReg, ResType,
I);
885 case TargetOpcode::G_FRAME_INDEX:
886 return selectFrameIndex(ResVReg, ResType,
I);
888 case TargetOpcode::G_LOAD:
889 return selectLoad(ResVReg, ResType,
I);
890 case TargetOpcode::G_STORE:
891 return selectStore(
I);
893 case TargetOpcode::G_BR:
894 return selectBranch(
I);
895 case TargetOpcode::G_BRCOND:
896 return selectBranchCond(
I);
898 case TargetOpcode::G_PHI:
899 return selectPhi(ResVReg, ResType,
I);
901 case TargetOpcode::G_FPTOSI:
902 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToS);
903 case TargetOpcode::G_FPTOUI:
904 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToU);
906 case TargetOpcode::G_FPTOSI_SAT:
907 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToS);
908 case TargetOpcode::G_FPTOUI_SAT:
909 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToU);
911 case TargetOpcode::G_SITOFP:
912 return selectIToF(ResVReg, ResType,
I,
true, SPIRV::OpConvertSToF);
913 case TargetOpcode::G_UITOFP:
914 return selectIToF(ResVReg, ResType,
I,
false, SPIRV::OpConvertUToF);
916 case TargetOpcode::G_CTPOP:
917 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitCount);
918 case TargetOpcode::G_SMIN:
919 return selectExtInst(ResVReg, ResType,
I, CL::s_min, GL::SMin);
920 case TargetOpcode::G_UMIN:
921 return selectExtInst(ResVReg, ResType,
I, CL::u_min, GL::UMin);
923 case TargetOpcode::G_SMAX:
924 return selectExtInst(ResVReg, ResType,
I, CL::s_max, GL::SMax);
925 case TargetOpcode::G_UMAX:
926 return selectExtInst(ResVReg, ResType,
I, CL::u_max, GL::UMax);
928 case TargetOpcode::G_SCMP:
929 return selectSUCmp(ResVReg, ResType,
I,
true);
930 case TargetOpcode::G_UCMP:
931 return selectSUCmp(ResVReg, ResType,
I,
false);
932 case TargetOpcode::G_LROUND:
933 case TargetOpcode::G_LLROUND: {
935 MRI->createVirtualRegister(
MRI->getRegClass(ResVReg),
"lround");
936 MRI->setRegClass(regForLround, &SPIRV::iIDRegClass);
938 regForLround, *(
I.getParent()->getParent()));
940 I, CL::round, GL::Round);
942 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConvertFToS))
948 case TargetOpcode::G_STRICT_FMA:
949 case TargetOpcode::G_FMA:
950 return selectExtInst(ResVReg, ResType,
I, CL::fma, GL::Fma);
952 case TargetOpcode::G_STRICT_FLDEXP:
953 return selectExtInst(ResVReg, ResType,
I, CL::ldexp);
955 case TargetOpcode::G_FPOW:
956 return selectExtInst(ResVReg, ResType,
I, CL::pow, GL::Pow);
957 case TargetOpcode::G_FPOWI:
958 return selectExtInst(ResVReg, ResType,
I, CL::pown);
960 case TargetOpcode::G_FEXP:
961 return selectExtInst(ResVReg, ResType,
I, CL::exp, GL::Exp);
962 case TargetOpcode::G_FEXP2:
963 return selectExtInst(ResVReg, ResType,
I, CL::exp2, GL::Exp2);
964 case TargetOpcode::G_FMODF:
965 return selectModf(ResVReg, ResType,
I);
967 case TargetOpcode::G_FLOG:
968 return selectExtInst(ResVReg, ResType,
I, CL::log, GL::Log);
969 case TargetOpcode::G_FLOG2:
970 return selectExtInst(ResVReg, ResType,
I, CL::log2, GL::Log2);
971 case TargetOpcode::G_FLOG10:
972 return selectLog10(ResVReg, ResType,
I);
974 case TargetOpcode::G_FABS:
975 return selectExtInst(ResVReg, ResType,
I, CL::fabs, GL::FAbs);
976 case TargetOpcode::G_ABS:
977 return selectExtInst(ResVReg, ResType,
I, CL::s_abs, GL::SAbs);
979 case TargetOpcode::G_FMINNUM:
980 case TargetOpcode::G_FMINIMUM:
981 return selectExtInst(ResVReg, ResType,
I, CL::fmin, GL::NMin);
982 case TargetOpcode::G_FMAXNUM:
983 case TargetOpcode::G_FMAXIMUM:
984 return selectExtInst(ResVReg, ResType,
I, CL::fmax, GL::NMax);
986 case TargetOpcode::G_FCOPYSIGN:
987 return selectExtInst(ResVReg, ResType,
I, CL::copysign);
989 case TargetOpcode::G_FCEIL:
990 return selectExtInst(ResVReg, ResType,
I, CL::ceil, GL::Ceil);
991 case TargetOpcode::G_FFLOOR:
992 return selectExtInst(ResVReg, ResType,
I, CL::floor, GL::Floor);
994 case TargetOpcode::G_FCOS:
995 return selectExtInst(ResVReg, ResType,
I, CL::cos, GL::Cos);
996 case TargetOpcode::G_FSIN:
997 return selectExtInst(ResVReg, ResType,
I, CL::sin, GL::Sin);
998 case TargetOpcode::G_FTAN:
999 return selectExtInst(ResVReg, ResType,
I, CL::tan, GL::Tan);
1000 case TargetOpcode::G_FACOS:
1001 return selectExtInst(ResVReg, ResType,
I, CL::acos, GL::Acos);
1002 case TargetOpcode::G_FASIN:
1003 return selectExtInst(ResVReg, ResType,
I, CL::asin, GL::Asin);
1004 case TargetOpcode::G_FATAN:
1005 return selectExtInst(ResVReg, ResType,
I, CL::atan, GL::Atan);
1006 case TargetOpcode::G_FATAN2:
1007 return selectExtInst(ResVReg, ResType,
I, CL::atan2, GL::Atan2);
1008 case TargetOpcode::G_FCOSH:
1009 return selectExtInst(ResVReg, ResType,
I, CL::cosh, GL::Cosh);
1010 case TargetOpcode::G_FSINH:
1011 return selectExtInst(ResVReg, ResType,
I, CL::sinh, GL::Sinh);
1012 case TargetOpcode::G_FTANH:
1013 return selectExtInst(ResVReg, ResType,
I, CL::tanh, GL::Tanh);
1015 case TargetOpcode::G_STRICT_FSQRT:
1016 case TargetOpcode::G_FSQRT:
1017 return selectExtInst(ResVReg, ResType,
I, CL::sqrt, GL::Sqrt);
1019 case TargetOpcode::G_CTTZ:
1020 case TargetOpcode::G_CTTZ_ZERO_UNDEF:
1021 return selectExtInst(ResVReg, ResType,
I, CL::ctz);
1022 case TargetOpcode::G_CTLZ:
1023 case TargetOpcode::G_CTLZ_ZERO_UNDEF:
1024 return selectExtInst(ResVReg, ResType,
I, CL::clz);
1026 case TargetOpcode::G_INTRINSIC_ROUND:
1027 return selectExtInst(ResVReg, ResType,
I, CL::round, GL::Round);
1028 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
1029 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
1030 case TargetOpcode::G_INTRINSIC_TRUNC:
1031 return selectExtInst(ResVReg, ResType,
I, CL::trunc, GL::Trunc);
1032 case TargetOpcode::G_FRINT:
1033 case TargetOpcode::G_FNEARBYINT:
1034 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
1036 case TargetOpcode::G_SMULH:
1037 return selectExtInst(ResVReg, ResType,
I, CL::s_mul_hi);
1038 case TargetOpcode::G_UMULH:
1039 return selectExtInst(ResVReg, ResType,
I, CL::u_mul_hi);
1041 case TargetOpcode::G_SADDSAT:
1042 return selectExtInst(ResVReg, ResType,
I, CL::s_add_sat);
1043 case TargetOpcode::G_UADDSAT:
1044 return selectExtInst(ResVReg, ResType,
I, CL::u_add_sat);
1045 case TargetOpcode::G_SSUBSAT:
1046 return selectExtInst(ResVReg, ResType,
I, CL::s_sub_sat);
1047 case TargetOpcode::G_USUBSAT:
1048 return selectExtInst(ResVReg, ResType,
I, CL::u_sub_sat);
1050 case TargetOpcode::G_FFREXP:
1051 return selectFrexp(ResVReg, ResType,
I);
1053 case TargetOpcode::G_UADDO:
1054 return selectOverflowArith(ResVReg, ResType,
I,
1055 ResType->
getOpcode() == SPIRV::OpTypeVector
1056 ? SPIRV::OpIAddCarryV
1057 : SPIRV::OpIAddCarryS);
1058 case TargetOpcode::G_USUBO:
1059 return selectOverflowArith(ResVReg, ResType,
I,
1060 ResType->
getOpcode() == SPIRV::OpTypeVector
1061 ? SPIRV::OpISubBorrowV
1062 : SPIRV::OpISubBorrowS);
1063 case TargetOpcode::G_UMULO:
1064 return selectOverflowArith(ResVReg, ResType,
I, SPIRV::OpUMulExtended);
1065 case TargetOpcode::G_SMULO:
1066 return selectOverflowArith(ResVReg, ResType,
I, SPIRV::OpSMulExtended);
1068 case TargetOpcode::G_SEXT:
1069 return selectExt(ResVReg, ResType,
I,
true);
1070 case TargetOpcode::G_ANYEXT:
1071 case TargetOpcode::G_ZEXT:
1072 return selectExt(ResVReg, ResType,
I,
false);
1073 case TargetOpcode::G_TRUNC:
1074 return selectTrunc(ResVReg, ResType,
I);
1075 case TargetOpcode::G_FPTRUNC:
1076 case TargetOpcode::G_FPEXT:
1077 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpFConvert);
1079 case TargetOpcode::G_PTRTOINT:
1080 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertPtrToU);
1081 case TargetOpcode::G_INTTOPTR:
1082 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertUToPtr);
1083 case TargetOpcode::G_BITCAST:
1084 return selectBitcast(ResVReg, ResType,
I);
1085 case TargetOpcode::G_ADDRSPACE_CAST:
1086 return selectAddrSpaceCast(ResVReg, ResType,
I);
1087 case TargetOpcode::G_PTR_ADD: {
1089 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
1093 assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1094 (*II).getOpcode() == TargetOpcode::COPY ||
1095 (*II).getOpcode() == SPIRV::OpVariable) &&
1098 bool IsGVInit =
false;
1100 UseIt =
MRI->use_instr_begin(
I.getOperand(0).getReg()),
1101 UseEnd =
MRI->use_instr_end();
1102 UseIt != UseEnd; UseIt = std::next(UseIt)) {
1103 if ((*UseIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1104 (*UseIt).getOpcode() == SPIRV::OpVariable) {
1114 if (GVPointeeType && ResPointeeType && GVPointeeType != ResPointeeType) {
1117 Register NewVReg =
MRI->createGenericVirtualRegister(
MRI->getType(GV));
1118 MRI->setRegClass(NewVReg,
MRI->getRegClass(GV));
1127 "incompatible result and operand types in a bitcast");
1129 MachineInstrBuilder MIB =
1130 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitcast))
1137 ? SPIRV::OpInBoundsAccessChain
1138 : SPIRV::OpInBoundsPtrAccessChain))
1142 .
addUse(
I.getOperand(2).getReg())
1145 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
1149 static_cast<uint32_t
>(SPIRV::Opcode::InBoundsPtrAccessChain))
1151 .
addUse(
I.getOperand(2).getReg())
1159 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
1162 .
addImm(
static_cast<uint32_t
>(
1163 SPIRV::Opcode::InBoundsPtrAccessChain))
1166 .
addUse(
I.getOperand(2).getReg());
1170 case TargetOpcode::G_ATOMICRMW_OR:
1171 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicOr);
1172 case TargetOpcode::G_ATOMICRMW_ADD:
1173 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicIAdd);
1174 case TargetOpcode::G_ATOMICRMW_AND:
1175 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicAnd);
1176 case TargetOpcode::G_ATOMICRMW_MAX:
1177 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMax);
1178 case TargetOpcode::G_ATOMICRMW_MIN:
1179 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMin);
1180 case TargetOpcode::G_ATOMICRMW_SUB:
1181 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicISub);
1182 case TargetOpcode::G_ATOMICRMW_XOR:
1183 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicXor);
1184 case TargetOpcode::G_ATOMICRMW_UMAX:
1185 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMax);
1186 case TargetOpcode::G_ATOMICRMW_UMIN:
1187 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMin);
1188 case TargetOpcode::G_ATOMICRMW_XCHG:
1189 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicExchange);
1190 case TargetOpcode::G_ATOMIC_CMPXCHG:
1191 return selectAtomicCmpXchg(ResVReg, ResType,
I);
1193 case TargetOpcode::G_ATOMICRMW_FADD:
1194 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT);
1195 case TargetOpcode::G_ATOMICRMW_FSUB:
1197 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT,
1198 ResType->
getOpcode() == SPIRV::OpTypeVector
1200 : SPIRV::OpFNegate);
1201 case TargetOpcode::G_ATOMICRMW_FMIN:
1202 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMinEXT);
1203 case TargetOpcode::G_ATOMICRMW_FMAX:
1204 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMaxEXT);
1206 case TargetOpcode::G_FENCE:
1207 return selectFence(
I);
1209 case TargetOpcode::G_STACKSAVE:
1210 return selectStackSave(ResVReg, ResType,
I);
1211 case TargetOpcode::G_STACKRESTORE:
1212 return selectStackRestore(
I);
1214 case TargetOpcode::G_UNMERGE_VALUES:
1220 case TargetOpcode::G_TRAP:
1221 case TargetOpcode::G_UBSANTRAP:
1222 case TargetOpcode::DBG_LABEL:
1224 case TargetOpcode::G_DEBUGTRAP:
1225 return selectDebugTrap(ResVReg, ResType,
I);
1232bool SPIRVInstructionSelector::selectDebugTrap(
Register ResVReg,
1234 MachineInstr &
I)
const {
1235 unsigned Opcode = SPIRV::OpNop;
1237 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
1241bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1244 GL::GLSLExtInst GLInst)
const {
1246 SPIRV::InstructionSet::InstructionSet::GLSL_std_450)) {
1247 std::string DiagMsg;
1248 raw_string_ostream OS(DiagMsg);
1249 I.print(OS,
true,
false,
false,
false);
1250 DiagMsg +=
" is only supported with the GLSL extended instruction set.\n";
1253 return selectExtInst(ResVReg, ResType,
I,
1254 {{SPIRV::InstructionSet::GLSL_std_450, GLInst}});
1257bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1260 CL::OpenCLExtInst CLInst)
const {
1261 return selectExtInst(ResVReg, ResType,
I,
1262 {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
1265bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1268 CL::OpenCLExtInst CLInst,
1269 GL::GLSLExtInst GLInst)
const {
1270 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
1271 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
1272 return selectExtInst(ResVReg, ResType,
I, ExtInsts);
1275bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1280 for (
const auto &Ex : Insts) {
1281 SPIRV::InstructionSet::InstructionSet
Set = Ex.first;
1282 uint32_t Opcode = Ex.second;
1285 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1288 .
addImm(
static_cast<uint32_t
>(Set))
1291 const unsigned NumOps =
I.getNumOperands();
1294 I.getOperand(Index).getType() ==
1295 MachineOperand::MachineOperandType::MO_IntrinsicID)
1298 MIB.
add(
I.getOperand(Index));
1304bool SPIRVInstructionSelector::selectExtInstForLRound(
1306 CL::OpenCLExtInst CLInst, GL::GLSLExtInst GLInst)
const {
1307 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
1308 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
1309 return selectExtInstForLRound(ResVReg, ResType,
I, ExtInsts);
1312bool SPIRVInstructionSelector::selectExtInstForLRound(
1315 for (
const auto &Ex : Insts) {
1316 SPIRV::InstructionSet::InstructionSet
Set = Ex.first;
1317 uint32_t Opcode = Ex.second;
1320 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1323 .
addImm(
static_cast<uint32_t
>(Set))
1325 const unsigned NumOps =
I.getNumOperands();
1328 I.getOperand(Index).getType() ==
1329 MachineOperand::MachineOperandType::MO_IntrinsicID)
1332 MIB.
add(
I.getOperand(Index));
1340bool SPIRVInstructionSelector::selectFrexp(
Register ResVReg,
1342 MachineInstr &
I)
const {
1343 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CL::frexp},
1344 {SPIRV::InstructionSet::GLSL_std_450, GL::Frexp}};
1345 for (
const auto &Ex : ExtInsts) {
1346 SPIRV::InstructionSet::InstructionSet
Set = Ex.first;
1347 uint32_t Opcode = Ex.second;
1351 MachineIRBuilder MIRBuilder(
I);
1354 PointeeTy, MIRBuilder, SPIRV::StorageClass::Function);
1359 auto MIB =
BuildMI(*It->getParent(), It, It->getDebugLoc(),
1360 TII.get(SPIRV::OpVariable))
1363 .
addImm(
static_cast<uint32_t
>(SPIRV::StorageClass::Function))
1367 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1370 .
addImm(
static_cast<uint32_t
>(Ex.first))
1372 .
add(
I.getOperand(2))
1377 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
1378 .
addDef(
I.getOperand(1).getReg())
1387bool SPIRVInstructionSelector::selectOpWithSrcs(
Register ResVReg,
1390 std::vector<Register> Srcs,
1391 unsigned Opcode)
const {
1392 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
1401bool SPIRVInstructionSelector::selectUnOp(
Register ResVReg,
1404 unsigned Opcode)
const {
1406 Register SrcReg =
I.getOperand(1).getReg();
1409 MRI->def_instr_begin(SrcReg);
1410 DefIt !=
MRI->def_instr_end(); DefIt = std::next(DefIt)) {
1411 unsigned DefOpCode = DefIt->getOpcode();
1412 if (DefOpCode == SPIRV::ASSIGN_TYPE) {
1415 if (
auto *VRD =
getVRegDef(*
MRI, DefIt->getOperand(1).getReg()))
1416 DefOpCode = VRD->getOpcode();
1418 if (DefOpCode == TargetOpcode::G_GLOBAL_VALUE ||
1419 DefOpCode == TargetOpcode::G_CONSTANT ||
1420 DefOpCode == SPIRV::OpVariable || DefOpCode == SPIRV::OpConstantI) {
1426 uint32_t SpecOpcode = 0;
1428 case SPIRV::OpConvertPtrToU:
1429 SpecOpcode =
static_cast<uint32_t
>(SPIRV::Opcode::ConvertPtrToU);
1431 case SPIRV::OpConvertUToPtr:
1432 SpecOpcode =
static_cast<uint32_t
>(SPIRV::Opcode::ConvertUToPtr);
1436 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1437 TII.get(SPIRV::OpSpecConstantOp))
1445 return selectOpWithSrcs(ResVReg, ResType,
I, {
I.getOperand(1).getReg()},
1449bool SPIRVInstructionSelector::selectBitcast(
Register ResVReg,
1451 MachineInstr &
I)
const {
1452 Register OpReg =
I.getOperand(1).getReg();
1456 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitcast);
1464 if (
MemOp->isVolatile())
1465 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
1466 if (
MemOp->isNonTemporal())
1467 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
1468 if (
MemOp->getAlign().value())
1469 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
1475 if (ST->canUseExtension(SPIRV::Extension::SPV_INTEL_memory_access_aliasing)) {
1476 if (
auto *MD =
MemOp->getAAInfo().Scope) {
1480 static_cast<uint32_t>(SPIRV::MemoryOperand::AliasScopeINTELMask);
1482 if (
auto *MD =
MemOp->getAAInfo().NoAlias) {
1486 static_cast<uint32_t>(SPIRV::MemoryOperand::NoAliasINTELMask);
1490 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
1492 if (SpvMemOp &
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
1504 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
1506 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
1508 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None))
1512bool SPIRVInstructionSelector::selectLoad(
Register ResVReg,
1514 MachineInstr &
I)
const {
1516 Register Ptr =
I.getOperand(1 + OpOffset).getReg();
1521 IntPtrDef->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
1522 Register HandleReg = IntPtrDef->getOperand(2).getReg();
1524 if (HandleType->
getOpcode() == SPIRV::OpTypeImage) {
1526 MRI->createVirtualRegister(
MRI->getRegClass(HandleReg));
1528 if (!loadHandleBeforePosition(NewHandleReg, HandleType, *HandleDef,
I)) {
1532 Register IdxReg = IntPtrDef->getOperand(3).getReg();
1533 return generateImageReadOrFetch(ResVReg, ResType, NewHandleReg, IdxReg,
1534 I.getDebugLoc(),
I);
1538 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
1542 if (!
I.getNumMemOperands()) {
1543 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
1545 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
1548 MachineIRBuilder MIRBuilder(
I);
1554bool SPIRVInstructionSelector::selectStore(MachineInstr &
I)
const {
1556 Register StoreVal =
I.getOperand(0 + OpOffset).getReg();
1557 Register Ptr =
I.getOperand(1 + OpOffset).getReg();
1562 IntPtrDef->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
1563 Register HandleReg = IntPtrDef->getOperand(2).getReg();
1565 MRI->createVirtualRegister(
MRI->getRegClass(HandleReg));
1568 if (!loadHandleBeforePosition(NewHandleReg, HandleType, *HandleDef,
I)) {
1572 Register IdxReg = IntPtrDef->getOperand(3).getReg();
1573 if (HandleType->
getOpcode() == SPIRV::OpTypeImage) {
1574 auto BMI =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1575 TII.get(SPIRV::OpImageWrite))
1581 if (sampledTypeIsSignedInteger(LLVMHandleType))
1584 return BMI.constrainAllUses(
TII,
TRI, RBI);
1589 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpStore))
1592 if (!
I.getNumMemOperands()) {
1593 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
1595 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
1598 MachineIRBuilder MIRBuilder(
I);
1604bool SPIRVInstructionSelector::selectStackSave(
Register ResVReg,
1606 MachineInstr &
I)
const {
1607 if (!STI.
canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
1609 "llvm.stacksave intrinsic: this instruction requires the following "
1610 "SPIR-V extension: SPV_INTEL_variable_length_array",
1613 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSaveMemoryINTEL))
1619bool SPIRVInstructionSelector::selectStackRestore(MachineInstr &
I)
const {
1620 if (!STI.
canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
1622 "llvm.stackrestore intrinsic: this instruction requires the following "
1623 "SPIR-V extension: SPV_INTEL_variable_length_array",
1625 if (!
I.getOperand(0).isReg())
1628 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpRestoreMemoryINTEL))
1629 .
addUse(
I.getOperand(0).getReg())
1634SPIRVInstructionSelector::getOrCreateMemSetGlobal(MachineInstr &
I)
const {
1635 MachineIRBuilder MIRBuilder(
I);
1636 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
1643 GlobalVariable *GV =
new GlobalVariable(*CurFunction.
getParent(), LLVMArrTy,
1647 Type *ValTy = Type::getInt8Ty(
I.getMF()->getFunction().getContext());
1648 Type *ArrTy = ArrayType::get(ValTy, Num);
1650 ArrTy, MIRBuilder, SPIRV::StorageClass::UniformConstant);
1653 ArrTy, MIRBuilder, SPIRV::AccessQualifier::None,
false);
1660 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVariable))
1663 .
addImm(SPIRV::StorageClass::UniformConstant)
1665 if (!MIBVar.constrainAllUses(
TII,
TRI, RBI))
1675bool SPIRVInstructionSelector::selectCopyMemory(MachineInstr &
I,
1678 Register DstReg =
I.getOperand(0).getReg();
1688 "Unable to determine pointee type size for OpCopyMemory");
1689 const DataLayout &
DL =
I.getMF()->getFunction().getDataLayout();
1690 if (CopySize !=
DL.getTypeStoreSize(
const_cast<Type *
>(LLVMPointeeTy)))
1692 "OpCopyMemory requires the size to match the pointee type size");
1693 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCopyMemory))
1696 if (
I.getNumMemOperands()) {
1697 MachineIRBuilder MIRBuilder(
I);
1703bool SPIRVInstructionSelector::selectCopyMemorySized(MachineInstr &
I,
1706 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCopyMemorySized))
1707 .
addUse(
I.getOperand(0).getReg())
1709 .
addUse(
I.getOperand(2).getReg());
1710 if (
I.getNumMemOperands()) {
1711 MachineIRBuilder MIRBuilder(
I);
1717bool SPIRVInstructionSelector::selectMemOperation(
Register ResVReg,
1718 MachineInstr &
I)
const {
1719 Register SrcReg =
I.getOperand(1).getReg();
1721 if (
I.getOpcode() == TargetOpcode::G_MEMSET) {
1722 Register VarReg = getOrCreateMemSetGlobal(
I);
1725 Type *ValTy = Type::getInt8Ty(
I.getMF()->getFunction().getContext());
1727 ValTy,
I, SPIRV::StorageClass::UniformConstant);
1729 Result &= selectOpWithSrcs(SrcReg, SourceTy,
I, {VarReg}, SPIRV::OpBitcast);
1732 Result &= selectCopyMemory(
I, SrcReg);
1734 Result &= selectCopyMemorySized(
I, SrcReg);
1736 if (ResVReg.
isValid() && ResVReg !=
I.getOperand(0).getReg())
1737 Result &= BuildCOPY(ResVReg,
I.getOperand(0).getReg(),
I);
1741bool SPIRVInstructionSelector::selectAtomicRMW(
Register ResVReg,
1745 unsigned NegateOpcode)
const {
1748 const MachineMemOperand *MemOp = *
I.memoperands_begin();
1751 auto ScopeConstant = buildI32Constant(Scope,
I);
1752 Register ScopeReg = ScopeConstant.first;
1753 Result &= ScopeConstant.second;
1755 Register Ptr =
I.getOperand(1).getReg();
1761 auto MemSemConstant = buildI32Constant(MemSem ,
I);
1762 Register MemSemReg = MemSemConstant.first;
1763 Result &= MemSemConstant.second;
1765 Register ValueReg =
I.getOperand(2).getReg();
1766 if (NegateOpcode != 0) {
1769 Result &= selectOpWithSrcs(TmpReg, ResType,
I, {ValueReg}, NegateOpcode);
1774 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(NewOpcode))
1784bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &
I)
const {
1785 unsigned ArgI =
I.getNumOperands() - 1;
1787 I.getOperand(ArgI).isReg() ?
I.getOperand(ArgI).getReg() :
Register(0);
1790 if (!SrcType || SrcType->
getOpcode() != SPIRV::OpTypeVector)
1792 "cannot select G_UNMERGE_VALUES with a non-vector argument");
1798 unsigned CurrentIndex = 0;
1799 for (
unsigned i = 0; i <
I.getNumDefs(); ++i) {
1800 Register ResVReg =
I.getOperand(i).getReg();
1803 LLT ResLLT =
MRI->getType(ResVReg);
1809 ResType = ScalarType;
1815 if (ResType->
getOpcode() == SPIRV::OpTypeVector) {
1818 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorShuffle))
1824 for (
unsigned j = 0;
j < NumElements; ++
j) {
1825 MIB.
addImm(CurrentIndex + j);
1827 CurrentIndex += NumElements;
1831 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1843bool SPIRVInstructionSelector::selectFence(MachineInstr &
I)
const {
1846 auto MemSemConstant = buildI32Constant(MemSem,
I);
1847 Register MemSemReg = MemSemConstant.first;
1848 bool Result = MemSemConstant.second;
1850 uint32_t
Scope =
static_cast<uint32_t
>(
1852 auto ScopeConstant = buildI32Constant(Scope,
I);
1853 Register ScopeReg = ScopeConstant.first;
1854 Result &= ScopeConstant.second;
1857 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpMemoryBarrier))
1863bool SPIRVInstructionSelector::selectOverflowArith(
Register ResVReg,
1866 unsigned Opcode)
const {
1867 Type *ResTy =
nullptr;
1871 "Not enough info to select the arithmetic with overflow instruction");
1874 "with overflow instruction");
1880 MachineIRBuilder MIRBuilder(
I);
1882 ResTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
false);
1883 assert(
I.getNumDefs() > 1 &&
"Not enought operands");
1889 Register ZeroReg = buildZerosVal(ResType,
I);
1892 MRI->setRegClass(StructVReg, &SPIRV::IDRegClass);
1894 if (ResName.
size() > 0)
1899 BuildMI(BB, MIRBuilder.getInsertPt(),
I.getDebugLoc(),
TII.get(Opcode))
1902 for (
unsigned i =
I.getNumDefs(); i <
I.getNumOperands(); ++i)
1903 MIB.
addUse(
I.getOperand(i).getReg());
1908 MRI->setRegClass(HigherVReg, &SPIRV::iIDRegClass);
1909 for (
unsigned i = 0; i <
I.getNumDefs(); ++i) {
1911 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1912 .
addDef(i == 1 ? HigherVReg :
I.getOperand(i).getReg())
1920 .
addDef(
I.getOperand(1).getReg())
1927bool SPIRVInstructionSelector::selectAtomicCmpXchg(
Register ResVReg,
1929 MachineInstr &
I)
const {
1934 Register Ptr =
I.getOperand(2).getReg();
1937 const MachineMemOperand *MemOp = *
I.memoperands_begin();
1940 auto ScopeConstant = buildI32Constant(Scope,
I);
1941 ScopeReg = ScopeConstant.first;
1942 Result &= ScopeConstant.second;
1944 unsigned ScSem =
static_cast<uint32_t
>(
1947 unsigned MemSemEq =
static_cast<uint32_t
>(
getMemSemantics(AO)) | ScSem;
1948 auto MemSemEqConstant = buildI32Constant(MemSemEq,
I);
1949 MemSemEqReg = MemSemEqConstant.first;
1950 Result &= MemSemEqConstant.second;
1952 unsigned MemSemNeq =
static_cast<uint32_t
>(
getMemSemantics(FO)) | ScSem;
1953 if (MemSemEq == MemSemNeq)
1954 MemSemNeqReg = MemSemEqReg;
1956 auto MemSemNeqConstant = buildI32Constant(MemSemEq,
I);
1957 MemSemNeqReg = MemSemNeqConstant.first;
1958 Result &= MemSemNeqConstant.second;
1961 ScopeReg =
I.getOperand(5).getReg();
1962 MemSemEqReg =
I.getOperand(6).getReg();
1963 MemSemNeqReg =
I.getOperand(7).getReg();
1967 Register Val =
I.getOperand(4).getReg();
1972 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpAtomicCompareExchange))
1999 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpCompositeInsert))
2010 case SPIRV::StorageClass::DeviceOnlyINTEL:
2011 case SPIRV::StorageClass::HostOnlyINTEL:
2020 bool IsGRef =
false;
2021 bool IsAllowedRefs =
2022 llvm::all_of(
MRI->use_instructions(ResVReg), [&IsGRef](
auto const &It) {
2023 unsigned Opcode = It.getOpcode();
2024 if (Opcode == SPIRV::OpConstantComposite ||
2025 Opcode == SPIRV::OpVariable ||
2026 isSpvIntrinsic(It, Intrinsic::spv_init_global))
2027 return IsGRef = true;
2028 return Opcode == SPIRV::OpName;
2030 return IsAllowedRefs && IsGRef;
2033Register SPIRVInstructionSelector::getUcharPtrTypeReg(
2034 MachineInstr &
I, SPIRV::StorageClass::StorageClass SC)
const {
2036 Type::getInt8Ty(
I.getMF()->getFunction().getContext()),
I, SC));
2040SPIRVInstructionSelector::buildSpecConstantOp(MachineInstr &
I,
Register Dest,
2042 uint32_t Opcode)
const {
2043 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
2044 TII.get(SPIRV::OpSpecConstantOp))
2052SPIRVInstructionSelector::buildConstGenericPtr(MachineInstr &
I,
Register SrcPtr,
2056 Register Tmp =
MRI->createVirtualRegister(&SPIRV::pIDRegClass);
2058 SPIRV::StorageClass::Generic),
2060 MachineFunction *MF =
I.getParent()->getParent();
2062 MachineInstrBuilder MIB = buildSpecConstantOp(
2064 static_cast<uint32_t
>(SPIRV::Opcode::PtrCastToGeneric));
2074bool SPIRVInstructionSelector::selectAddrSpaceCast(
Register ResVReg,
2076 MachineInstr &
I)
const {
2080 Register SrcPtr =
I.getOperand(1).getReg();
2084 if (SrcPtrTy->
getOpcode() != SPIRV::OpTypePointer ||
2085 ResType->
getOpcode() != SPIRV::OpTypePointer)
2086 return BuildCOPY(ResVReg, SrcPtr,
I);
2096 unsigned SpecOpcode =
2098 ?
static_cast<uint32_t
>(SPIRV::Opcode::PtrCastToGeneric)
2101 ? static_cast<uint32_t>(
SPIRV::Opcode::GenericCastToPtr)
2108 return buildSpecConstantOp(
I, ResVReg, SrcPtr,
2109 getUcharPtrTypeReg(
I, DstSC), SpecOpcode)
2110 .constrainAllUses(
TII,
TRI, RBI);
2112 MachineInstrBuilder MIB = buildConstGenericPtr(
I, SrcPtr, SrcPtrTy);
2114 buildSpecConstantOp(
2116 getUcharPtrTypeReg(
I, DstSC),
2117 static_cast<uint32_t
>(SPIRV::Opcode::GenericCastToPtr))
2118 .constrainAllUses(
TII,
TRI, RBI);
2124 return BuildCOPY(ResVReg, SrcPtr,
I);
2126 if ((SrcSC == SPIRV::StorageClass::Function &&
2127 DstSC == SPIRV::StorageClass::Private) ||
2128 (DstSC == SPIRV::StorageClass::Function &&
2129 SrcSC == SPIRV::StorageClass::Private))
2130 return BuildCOPY(ResVReg, SrcPtr,
I);
2134 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
2137 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
2158 return selectUnOp(ResVReg, ResType,
I,
2159 SPIRV::OpPtrCastToCrossWorkgroupINTEL);
2161 return selectUnOp(ResVReg, ResType,
I,
2162 SPIRV::OpCrossWorkgroupCastToPtrINTEL);
2164 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
2166 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
2176 return SPIRV::OpFOrdEqual;
2178 return SPIRV::OpFOrdGreaterThanEqual;
2180 return SPIRV::OpFOrdGreaterThan;
2182 return SPIRV::OpFOrdLessThanEqual;
2184 return SPIRV::OpFOrdLessThan;
2186 return SPIRV::OpFOrdNotEqual;
2188 return SPIRV::OpOrdered;
2190 return SPIRV::OpFUnordEqual;
2192 return SPIRV::OpFUnordGreaterThanEqual;
2194 return SPIRV::OpFUnordGreaterThan;
2196 return SPIRV::OpFUnordLessThanEqual;
2198 return SPIRV::OpFUnordLessThan;
2200 return SPIRV::OpFUnordNotEqual;
2202 return SPIRV::OpUnordered;
2212 return SPIRV::OpIEqual;
2214 return SPIRV::OpINotEqual;
2216 return SPIRV::OpSGreaterThanEqual;
2218 return SPIRV::OpSGreaterThan;
2220 return SPIRV::OpSLessThanEqual;
2222 return SPIRV::OpSLessThan;
2224 return SPIRV::OpUGreaterThanEqual;
2226 return SPIRV::OpUGreaterThan;
2228 return SPIRV::OpULessThanEqual;
2230 return SPIRV::OpULessThan;
2239 return SPIRV::OpPtrEqual;
2241 return SPIRV::OpPtrNotEqual;
2252 return SPIRV::OpLogicalEqual;
2254 return SPIRV::OpLogicalNotEqual;
2288bool SPIRVInstructionSelector::selectAnyOrAll(
Register ResVReg,
2291 unsigned OpAnyOrAll)
const {
2292 assert(
I.getNumOperands() == 3);
2293 assert(
I.getOperand(2).isReg());
2295 Register InputRegister =
I.getOperand(2).getReg();
2302 bool IsVectorTy = InputType->
getOpcode() == SPIRV::OpTypeVector;
2303 if (IsBoolTy && !IsVectorTy) {
2304 assert(ResVReg ==
I.getOperand(0).getReg());
2305 return BuildCOPY(ResVReg, InputRegister,
I);
2309 unsigned SpirvNotEqualId =
2310 IsFloatTy ? SPIRV::OpFOrdNotEqual : SPIRV::OpINotEqual;
2317 IsBoolTy ? InputRegister
2326 IsFloatTy ? buildZerosValF(InputType,
I) : buildZerosVal(InputType,
I);
2346bool SPIRVInstructionSelector::selectAll(
Register ResVReg,
2348 MachineInstr &
I)
const {
2349 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAll);
2352bool SPIRVInstructionSelector::selectAny(
Register ResVReg,
2354 MachineInstr &
I)
const {
2355 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAny);
2359bool SPIRVInstructionSelector::selectFloatDot(
Register ResVReg,
2361 MachineInstr &
I)
const {
2362 assert(
I.getNumOperands() == 4);
2363 assert(
I.getOperand(2).isReg());
2364 assert(
I.getOperand(3).isReg());
2371 "dot product requires a vector of at least 2 components");
2379 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpDot))
2382 .
addUse(
I.getOperand(2).getReg())
2383 .
addUse(
I.getOperand(3).getReg())
2387bool SPIRVInstructionSelector::selectIntegerDot(
Register ResVReg,
2391 assert(
I.getNumOperands() == 4);
2392 assert(
I.getOperand(2).isReg());
2393 assert(
I.getOperand(3).isReg());
2396 auto DotOp =
Signed ? SPIRV::OpSDot : SPIRV::OpUDot;
2400 .
addUse(
I.getOperand(2).getReg())
2401 .
addUse(
I.getOperand(3).getReg())
2407bool SPIRVInstructionSelector::selectIntegerDotExpansion(
2409 assert(
I.getNumOperands() == 4);
2410 assert(
I.getOperand(2).isReg());
2411 assert(
I.getOperand(3).isReg());
2415 Register Vec0 =
I.getOperand(2).getReg();
2416 Register Vec1 =
I.getOperand(3).getReg();
2429 "dot product requires a vector of at least 2 components");
2443 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
2466bool SPIRVInstructionSelector::selectOpIsInf(
Register ResVReg,
2468 MachineInstr &
I)
const {
2470 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpIsInf))
2473 .
addUse(
I.getOperand(2).getReg())
2477bool SPIRVInstructionSelector::selectOpIsNan(
Register ResVReg,
2479 MachineInstr &
I)
const {
2481 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpIsNan))
2484 .
addUse(
I.getOperand(2).getReg())
2488template <
bool Signed>
2489bool SPIRVInstructionSelector::selectDot4AddPacked(
Register ResVReg,
2491 MachineInstr &
I)
const {
2492 assert(
I.getNumOperands() == 5);
2493 assert(
I.getOperand(2).isReg());
2494 assert(
I.getOperand(3).isReg());
2495 assert(
I.getOperand(4).isReg());
2498 Register Acc =
I.getOperand(2).getReg();
2502 auto DotOp =
Signed ? SPIRV::OpSDot : SPIRV::OpUDot;
2522template <
bool Signed>
2523bool SPIRVInstructionSelector::selectDot4AddPackedExpansion(
2525 assert(
I.getNumOperands() == 5);
2526 assert(
I.getOperand(2).isReg());
2527 assert(
I.getOperand(3).isReg());
2528 assert(
I.getOperand(4).isReg());
2533 Register Acc =
I.getOperand(2).getReg();
2539 Signed ? SPIRV::OpBitFieldSExtract : SPIRV::OpBitFieldUExtract;
2543 for (
unsigned i = 0; i < 4; i++) {
2545 Register AElt =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2556 Register BElt =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2576 Register MaskMul =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2588 i < 3 ?
MRI->createVirtualRegister(&SPIRV::IDRegClass) : ResVReg;
2604bool SPIRVInstructionSelector::selectSaturate(
Register ResVReg,
2606 MachineInstr &
I)
const {
2607 assert(
I.getNumOperands() == 3);
2608 assert(
I.getOperand(2).isReg());
2610 Register VZero = buildZerosValF(ResType,
I);
2611 Register VOne = buildOnesValF(ResType,
I);
2613 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
2616 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
2618 .
addUse(
I.getOperand(2).getReg())
2624bool SPIRVInstructionSelector::selectSign(
Register ResVReg,
2626 MachineInstr &
I)
const {
2627 assert(
I.getNumOperands() == 3);
2628 assert(
I.getOperand(2).isReg());
2630 Register InputRegister =
I.getOperand(2).getReg();
2632 auto &
DL =
I.getDebugLoc();
2642 bool NeedsConversion = IsFloatTy || SignBitWidth != ResBitWidth;
2644 auto SignOpcode = IsFloatTy ? GL::FSign : GL::SSign;
2646 ?
MRI->createVirtualRegister(&SPIRV::IDRegClass)
2653 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
2658 if (NeedsConversion) {
2659 auto ConvertOpcode = IsFloatTy ? SPIRV::OpConvertFToS : SPIRV::OpSConvert;
2670bool SPIRVInstructionSelector::selectWaveOpInst(
Register ResVReg,
2673 unsigned Opcode)
const {
2677 auto BMI =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2683 for (
unsigned J = 2; J <
I.getNumOperands(); J++) {
2684 BMI.addUse(
I.getOperand(J).getReg());
2690bool SPIRVInstructionSelector::selectWaveActiveCountBits(
2696 bool Result = selectWaveOpInst(BallotReg, BallotType,
I,
2697 SPIRV::OpGroupNonUniformBallot);
2701 TII.get(SPIRV::OpGroupNonUniformBallotBitCount))
2706 .
addImm(SPIRV::GroupOperation::Reduce)
2713bool SPIRVInstructionSelector::selectWaveReduceMax(
Register ResVReg,
2716 bool IsUnsigned)
const {
2717 assert(
I.getNumOperands() == 3);
2718 assert(
I.getOperand(2).isReg());
2720 Register InputRegister =
I.getOperand(2).getReg();
2729 auto IntegerOpcodeType =
2730 IsUnsigned ? SPIRV::OpGroupNonUniformUMax : SPIRV::OpGroupNonUniformSMax;
2731 auto Opcode = IsFloatTy ? SPIRV::OpGroupNonUniformFMax : IntegerOpcodeType;
2732 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2737 .
addImm(SPIRV::GroupOperation::Reduce)
2738 .
addUse(
I.getOperand(2).getReg())
2742bool SPIRVInstructionSelector::selectWaveReduceMin(
Register ResVReg,
2745 bool IsUnsigned)
const {
2746 assert(
I.getNumOperands() == 3);
2747 assert(
I.getOperand(2).isReg());
2749 Register InputRegister =
I.getOperand(2).getReg();
2758 auto IntegerOpcodeType =
2759 IsUnsigned ? SPIRV::OpGroupNonUniformUMin : SPIRV::OpGroupNonUniformSMin;
2760 auto Opcode = IsFloatTy ? SPIRV::OpGroupNonUniformFMin : IntegerOpcodeType;
2761 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2766 .
addImm(SPIRV::GroupOperation::Reduce)
2767 .
addUse(
I.getOperand(2).getReg())
2771bool SPIRVInstructionSelector::selectWaveReduceSum(
Register ResVReg,
2773 MachineInstr &
I)
const {
2774 assert(
I.getNumOperands() == 3);
2775 assert(
I.getOperand(2).isReg());
2777 Register InputRegister =
I.getOperand(2).getReg();
2787 IsFloatTy ? SPIRV::OpGroupNonUniformFAdd : SPIRV::OpGroupNonUniformIAdd;
2788 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2793 .
addImm(SPIRV::GroupOperation::Reduce)
2794 .
addUse(
I.getOperand(2).getReg());
2797bool SPIRVInstructionSelector::selectBitreverse(
Register ResVReg,
2799 MachineInstr &
I)
const {
2801 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitReverse))
2804 .
addUse(
I.getOperand(1).getReg())
2808bool SPIRVInstructionSelector::selectFreeze(
Register ResVReg,
2810 MachineInstr &
I)
const {
2816 if (!
I.getOperand(0).isReg() || !
I.getOperand(1).isReg())
2818 Register OpReg =
I.getOperand(1).getReg();
2819 if (MachineInstr *Def =
MRI->getVRegDef(OpReg)) {
2820 if (
Def->getOpcode() == TargetOpcode::COPY)
2821 Def =
MRI->getVRegDef(
Def->getOperand(1).getReg());
2823 switch (
Def->getOpcode()) {
2824 case SPIRV::ASSIGN_TYPE:
2825 if (MachineInstr *AssignToDef =
2826 MRI->getVRegDef(
Def->getOperand(1).getReg())) {
2827 if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
2828 Reg =
Def->getOperand(2).getReg();
2831 case SPIRV::OpUndef:
2832 Reg =
Def->getOperand(1).getReg();
2835 unsigned DestOpCode;
2837 DestOpCode = SPIRV::OpConstantNull;
2839 DestOpCode = TargetOpcode::COPY;
2842 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(DestOpCode))
2843 .
addDef(
I.getOperand(0).getReg())
2850bool SPIRVInstructionSelector::selectBuildVector(
Register ResVReg,
2852 MachineInstr &
I)
const {
2854 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2856 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
2860 if (
I.getNumExplicitOperands() -
I.getNumExplicitDefs() !=
N)
2865 for (
unsigned i =
I.getNumExplicitDefs();
2866 i <
I.getNumExplicitOperands() && IsConst; ++i)
2870 if (!IsConst &&
N < 2)
2872 "There must be at least two constituent operands in a vector");
2875 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
2876 TII.get(IsConst ? SPIRV::OpConstantComposite
2877 : SPIRV::OpCompositeConstruct))
2880 for (
unsigned i =
I.getNumExplicitDefs(); i <
I.getNumExplicitOperands(); ++i)
2881 MIB.
addUse(
I.getOperand(i).getReg());
2885bool SPIRVInstructionSelector::selectSplatVector(
Register ResVReg,
2887 MachineInstr &
I)
const {
2889 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2891 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
2897 if (!
I.getOperand(
OpIdx).isReg())
2904 if (!IsConst &&
N < 2)
2906 "There must be at least two constituent operands in a vector");
2909 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
2910 TII.get(IsConst ? SPIRV::OpConstantComposite
2911 : SPIRV::OpCompositeConstruct))
2914 for (
unsigned i = 0; i <
N; ++i)
2919bool SPIRVInstructionSelector::selectDiscard(
Register ResVReg,
2921 MachineInstr &
I)
const {
2926 SPIRV::Extension::SPV_EXT_demote_to_helper_invocation) ||
2928 Opcode = SPIRV::OpDemoteToHelperInvocation;
2930 Opcode = SPIRV::OpKill;
2932 if (MachineInstr *NextI =
I.getNextNode()) {
2934 NextI->removeFromParent();
2939 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2943bool SPIRVInstructionSelector::selectCmp(
Register ResVReg,
2946 MachineInstr &
I)
const {
2947 Register Cmp0 =
I.getOperand(2).getReg();
2948 Register Cmp1 =
I.getOperand(3).getReg();
2951 "CMP operands should have the same type");
2952 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(CmpOpc))
2961bool SPIRVInstructionSelector::selectICmp(
Register ResVReg,
2963 MachineInstr &
I)
const {
2964 auto Pred =
I.getOperand(1).getPredicate();
2967 Register CmpOperand =
I.getOperand(2).getReg();
2974 return selectCmp(ResVReg, ResType, CmpOpc,
I);
2977std::pair<Register, bool>
2978SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &
I,
2984 auto ConstInt = ConstantInt::get(LLVMTy, Val);
2992 ?
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
2995 :
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantI))
2998 .
addImm(APInt(32, Val).getZExtValue());
3000 GR.
add(ConstInt,
MI);
3005bool SPIRVInstructionSelector::selectFCmp(
Register ResVReg,
3007 MachineInstr &
I)
const {
3009 return selectCmp(ResVReg, ResType, CmpOp,
I);
3013 MachineInstr &
I)
const {
3016 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
3022 MachineInstr &
I)
const {
3026 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
3032 MachineInstr &
I)
const {
3036 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
3043 MachineInstr &
I)
const {
3047 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
3052bool SPIRVInstructionSelector::selectSelect(
Register ResVReg,
3054 MachineInstr &
I)
const {
3055 Register SelectFirstArg =
I.getOperand(2).getReg();
3056 Register SelectSecondArg =
I.getOperand(3).getReg();
3065 SPIRV::OpTypeVector;
3072 Opcode = IsScalarBool ? SPIRV::OpSelectVFSCond : SPIRV::OpSelectVFVCond;
3073 }
else if (IsPtrTy) {
3074 Opcode = IsScalarBool ? SPIRV::OpSelectVPSCond : SPIRV::OpSelectVPVCond;
3076 Opcode = IsScalarBool ? SPIRV::OpSelectVISCond : SPIRV::OpSelectVIVCond;
3080 Opcode = IsScalarBool ? SPIRV::OpSelectSFSCond : SPIRV::OpSelectVFVCond;
3081 }
else if (IsPtrTy) {
3082 Opcode = IsScalarBool ? SPIRV::OpSelectSPSCond : SPIRV::OpSelectVPVCond;
3084 Opcode = IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectVIVCond;
3087 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
3090 .
addUse(
I.getOperand(1).getReg())
3096bool SPIRVInstructionSelector::selectSelectDefaultArgs(
Register ResVReg,
3099 bool IsSigned)
const {
3101 Register ZeroReg = buildZerosVal(ResType,
I);
3102 Register OneReg = buildOnesVal(IsSigned, ResType,
I);
3106 IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectVIVCond;
3107 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
3110 .
addUse(
I.getOperand(1).getReg())
3116bool SPIRVInstructionSelector::selectIToF(
Register ResVReg,
3118 MachineInstr &
I,
bool IsSigned,
3119 unsigned Opcode)
const {
3120 Register SrcReg =
I.getOperand(1).getReg();
3126 if (ResType->
getOpcode() == SPIRV::OpTypeVector) {
3131 selectSelectDefaultArgs(SrcReg, TmpType,
I,
false);
3133 return selectOpWithSrcs(ResVReg, ResType,
I, {SrcReg}, Opcode);
3136bool SPIRVInstructionSelector::selectExt(
Register ResVReg,
3138 MachineInstr &
I,
bool IsSigned)
const {
3139 Register SrcReg =
I.getOperand(1).getReg();
3141 return selectSelectDefaultArgs(ResVReg, ResType,
I, IsSigned);
3144 if (SrcType == ResType)
3145 return BuildCOPY(ResVReg, SrcReg,
I);
3147 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
3148 return selectUnOp(ResVReg, ResType,
I, Opcode);
3151bool SPIRVInstructionSelector::selectSUCmp(
Register ResVReg,
3154 bool IsSigned)
const {
3155 MachineIRBuilder MIRBuilder(
I);
3156 MachineRegisterInfo *
MRI = MIRBuilder.getMRI();
3171 TII.get(IsSigned ? SPIRV::OpSLessThanEqual
3172 : SPIRV::OpULessThanEqual))
3175 .
addUse(
I.getOperand(1).getReg())
3176 .
addUse(
I.getOperand(2).getReg())
3182 TII.get(IsSigned ? SPIRV::OpSLessThan : SPIRV::OpULessThan))
3185 .
addUse(
I.getOperand(1).getReg())
3186 .
addUse(
I.getOperand(2).getReg())
3194 unsigned SelectOpcode =
3195 N > 1 ? SPIRV::OpSelectVIVCond : SPIRV::OpSelectSISCond;
3200 .
addUse(buildOnesVal(
true, ResType,
I))
3201 .
addUse(buildZerosVal(ResType,
I))
3208 .
addUse(buildOnesVal(
false, ResType,
I))
3212bool SPIRVInstructionSelector::selectIntToBool(
Register IntReg,
3219 bool IsVectorTy = IntTy->
getOpcode() == SPIRV::OpTypeVector;
3220 unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
3222 Register One = buildOnesVal(
false, IntTy,
I);
3238bool SPIRVInstructionSelector::selectTrunc(
Register ResVReg,
3240 MachineInstr &
I)
const {
3241 Register IntReg =
I.getOperand(1).getReg();
3244 return selectIntToBool(IntReg, ResVReg,
I, ArgType, ResType);
3245 if (ArgType == ResType)
3246 return BuildCOPY(ResVReg, IntReg,
I);
3248 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
3249 return selectUnOp(ResVReg, ResType,
I, Opcode);
3252bool SPIRVInstructionSelector::selectConst(
Register ResVReg,
3254 MachineInstr &
I)
const {
3255 unsigned Opcode =
I.getOpcode();
3256 unsigned TpOpcode = ResType->
getOpcode();
3258 if (TpOpcode == SPIRV::OpTypePointer || TpOpcode == SPIRV::OpTypeEvent) {
3259 assert(Opcode == TargetOpcode::G_CONSTANT &&
3260 I.getOperand(1).getCImm()->isZero());
3261 MachineBasicBlock &DepMBB =
I.getMF()->front();
3264 }
else if (Opcode == TargetOpcode::G_FCONSTANT) {
3271 return Reg == ResVReg ?
true : BuildCOPY(ResVReg,
Reg,
I);
3274bool SPIRVInstructionSelector::selectOpUndef(
Register ResVReg,
3276 MachineInstr &
I)
const {
3277 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
3283bool SPIRVInstructionSelector::selectInsertVal(
Register ResVReg,
3285 MachineInstr &
I)
const {
3287 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeInsert))
3291 .
addUse(
I.getOperand(3).getReg())
3293 .
addUse(
I.getOperand(2).getReg());
3294 for (
unsigned i = 4; i <
I.getNumOperands(); i++)
3299bool SPIRVInstructionSelector::selectExtractVal(
Register ResVReg,
3301 MachineInstr &
I)
const {
3303 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
3306 .
addUse(
I.getOperand(2).getReg());
3307 for (
unsigned i = 3; i <
I.getNumOperands(); i++)
3312bool SPIRVInstructionSelector::selectInsertElt(
Register ResVReg,
3314 MachineInstr &
I)
const {
3316 return selectInsertVal(ResVReg, ResType,
I);
3318 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorInsertDynamic))
3321 .
addUse(
I.getOperand(2).getReg())
3322 .
addUse(
I.getOperand(3).getReg())
3323 .
addUse(
I.getOperand(4).getReg())
3327bool SPIRVInstructionSelector::selectExtractElt(
Register ResVReg,
3329 MachineInstr &
I)
const {
3331 return selectExtractVal(ResVReg, ResType,
I);
3333 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorExtractDynamic))
3336 .
addUse(
I.getOperand(2).getReg())
3337 .
addUse(
I.getOperand(3).getReg())
3341bool SPIRVInstructionSelector::selectGEP(
Register ResVReg,
3343 MachineInstr &
I)
const {
3344 const bool IsGEPInBounds =
I.getOperand(2).getImm();
3350 ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
3351 : SPIRV::OpAccessChain)
3352 : (IsGEPInBounds ?
SPIRV::OpInBoundsPtrAccessChain
3353 :
SPIRV::OpPtrAccessChain);
3355 auto Res =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
3359 .
addUse(
I.getOperand(3).getReg());
3361 (Opcode == SPIRV::OpPtrAccessChain ||
3362 Opcode == SPIRV::OpInBoundsPtrAccessChain ||
3364 "Cannot translate GEP to OpAccessChain. First index must be 0.");
3367 const unsigned StartingIndex =
3368 (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
3371 for (
unsigned i = StartingIndex; i <
I.getNumExplicitOperands(); ++i)
3372 Res.addUse(
I.getOperand(i).getReg());
3373 return Res.constrainAllUses(
TII,
TRI, RBI);
3377bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
3380 unsigned Lim =
I.getNumExplicitOperands();
3381 for (
unsigned i =
I.getNumExplicitDefs() + 1; i < Lim; ++i) {
3382 Register OpReg =
I.getOperand(i).getReg();
3383 MachineInstr *OpDefine =
MRI->getVRegDef(OpReg);
3385 SmallPtrSet<SPIRVType *, 4> Visited;
3386 if (!OpDefine || !OpType ||
isConstReg(
MRI, OpDefine, Visited) ||
3387 OpDefine->
getOpcode() == TargetOpcode::G_ADDRSPACE_CAST ||
3388 OpDefine->
getOpcode() == TargetOpcode::G_INTTOPTR ||
3395 MachineFunction *MF =
I.getMF();
3407 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3408 TII.get(SPIRV::OpSpecConstantOp))
3411 .
addImm(
static_cast<uint32_t
>(SPIRV::Opcode::Bitcast))
3413 GR.
add(OpDefine, MIB);
3421bool SPIRVInstructionSelector::selectDerivativeInst(
3423 const unsigned DPdOpCode)
const {
3426 errorIfInstrOutsideShader(
I);
3431 Register SrcReg =
I.getOperand(2).getReg();
3436 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(DPdOpCode))
3439 .
addUse(
I.getOperand(2).getReg());
3441 MachineIRBuilder MIRBuilder(
I);
3444 if (componentCount != 1)
3448 const TargetRegisterClass *RegClass = GR.
getRegClass(SrcType);
3449 Register ConvertToVReg =
MRI->createVirtualRegister(RegClass);
3450 Register DpdOpVReg =
MRI->createVirtualRegister(RegClass);
3453 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpFConvert))
3464 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpFConvert))
3472bool SPIRVInstructionSelector::selectIntrinsic(
Register ResVReg,
3474 MachineInstr &
I)
const {
3478 case Intrinsic::spv_load:
3479 return selectLoad(ResVReg, ResType,
I);
3480 case Intrinsic::spv_store:
3481 return selectStore(
I);
3482 case Intrinsic::spv_extractv:
3483 return selectExtractVal(ResVReg, ResType,
I);
3484 case Intrinsic::spv_insertv:
3485 return selectInsertVal(ResVReg, ResType,
I);
3486 case Intrinsic::spv_extractelt:
3487 return selectExtractElt(ResVReg, ResType,
I);
3488 case Intrinsic::spv_insertelt:
3489 return selectInsertElt(ResVReg, ResType,
I);
3490 case Intrinsic::spv_gep:
3491 return selectGEP(ResVReg, ResType,
I);
3492 case Intrinsic::spv_bitcast: {
3493 Register OpReg =
I.getOperand(2).getReg();
3498 return selectOpWithSrcs(ResVReg, ResType,
I, {OpReg}, SPIRV::OpBitcast);
3500 case Intrinsic::spv_unref_global:
3501 case Intrinsic::spv_init_global: {
3502 MachineInstr *
MI =
MRI->getVRegDef(
I.getOperand(1).getReg());
3503 MachineInstr *Init =
I.getNumExplicitOperands() > 2
3504 ?
MRI->getVRegDef(
I.getOperand(2).getReg())
3507 Register GVarVReg =
MI->getOperand(0).getReg();
3508 bool Res = selectGlobalValue(GVarVReg, *
MI, Init);
3512 if (
MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE) {
3514 MI->removeFromParent();
3518 case Intrinsic::spv_undef: {
3519 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
3524 case Intrinsic::spv_const_composite: {
3526 bool IsNull =
I.getNumExplicitDefs() + 1 ==
I.getNumExplicitOperands();
3532 if (!wrapIntoSpecConstantOp(
I, CompositeArgs))
3534 MachineIRBuilder MIR(
I);
3536 MIR, SPIRV::OpConstantComposite, 3,
3537 SPIRV::OpConstantCompositeContinuedINTEL, CompositeArgs, ResVReg,
3539 for (
auto *Instr : Instructions) {
3540 Instr->setDebugLoc(
I.getDebugLoc());
3546 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
3552 case Intrinsic::spv_assign_name: {
3553 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpName));
3554 MIB.
addUse(
I.getOperand(
I.getNumExplicitDefs() + 1).getReg());
3555 for (
unsigned i =
I.getNumExplicitDefs() + 2;
3556 i <
I.getNumExplicitOperands(); ++i) {
3557 MIB.
addImm(
I.getOperand(i).getImm());
3561 case Intrinsic::spv_switch: {
3562 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSwitch));
3563 for (
unsigned i = 1; i <
I.getNumExplicitOperands(); ++i) {
3564 if (
I.getOperand(i).isReg())
3565 MIB.
addReg(
I.getOperand(i).getReg());
3566 else if (
I.getOperand(i).isCImm())
3567 addNumImm(
I.getOperand(i).getCImm()->getValue(), MIB);
3568 else if (
I.getOperand(i).isMBB())
3569 MIB.
addMBB(
I.getOperand(i).getMBB());
3575 case Intrinsic::spv_loop_merge: {
3576 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoopMerge));
3577 for (
unsigned i = 1; i <
I.getNumExplicitOperands(); ++i) {
3578 if (
I.getOperand(i).isMBB())
3579 MIB.
addMBB(
I.getOperand(i).getMBB());
3585 case Intrinsic::spv_selection_merge: {
3587 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSelectionMerge));
3588 assert(
I.getOperand(1).isMBB() &&
3589 "operand 1 to spv_selection_merge must be a basic block");
3590 MIB.
addMBB(
I.getOperand(1).getMBB());
3591 MIB.
addImm(getSelectionOperandForImm(
I.getOperand(2).getImm()));
3594 case Intrinsic::spv_cmpxchg:
3595 return selectAtomicCmpXchg(ResVReg, ResType,
I);
3596 case Intrinsic::spv_unreachable:
3597 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUnreachable))
3599 case Intrinsic::spv_alloca:
3600 return selectFrameIndex(ResVReg, ResType,
I);
3601 case Intrinsic::spv_alloca_array:
3602 return selectAllocaArray(ResVReg, ResType,
I);
3603 case Intrinsic::spv_assume:
3605 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpAssumeTrueKHR))
3606 .
addUse(
I.getOperand(1).getReg())
3609 case Intrinsic::spv_expect:
3611 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExpectKHR))
3614 .
addUse(
I.getOperand(2).getReg())
3615 .
addUse(
I.getOperand(3).getReg())
3618 case Intrinsic::arithmetic_fence:
3621 TII.get(SPIRV::OpArithmeticFenceEXT))
3624 .
addUse(
I.getOperand(2).getReg())
3627 return BuildCOPY(ResVReg,
I.getOperand(2).getReg(),
I);
3629 case Intrinsic::spv_thread_id:
3635 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalInvocationId, ResVReg,
3637 case Intrinsic::spv_thread_id_in_group:
3643 return loadVec3BuiltinInputID(SPIRV::BuiltIn::LocalInvocationId, ResVReg,
3645 case Intrinsic::spv_group_id:
3651 return loadVec3BuiltinInputID(SPIRV::BuiltIn::WorkgroupId, ResVReg, ResType,
3653 case Intrinsic::spv_flattened_thread_id_in_group:
3660 return loadBuiltinInputID(SPIRV::BuiltIn::LocalInvocationIndex, ResVReg,
3662 case Intrinsic::spv_workgroup_size:
3663 return loadVec3BuiltinInputID(SPIRV::BuiltIn::WorkgroupSize, ResVReg,
3665 case Intrinsic::spv_global_size:
3666 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalSize, ResVReg, ResType,
3668 case Intrinsic::spv_global_offset:
3669 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalOffset, ResVReg,
3671 case Intrinsic::spv_num_workgroups:
3672 return loadVec3BuiltinInputID(SPIRV::BuiltIn::NumWorkgroups, ResVReg,
3674 case Intrinsic::spv_subgroup_size:
3675 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupSize, ResVReg, ResType,
3677 case Intrinsic::spv_num_subgroups:
3678 return loadBuiltinInputID(SPIRV::BuiltIn::NumSubgroups, ResVReg, ResType,
3680 case Intrinsic::spv_subgroup_id:
3681 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupId, ResVReg, ResType,
I);
3682 case Intrinsic::spv_subgroup_local_invocation_id:
3683 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupLocalInvocationId,
3684 ResVReg, ResType,
I);
3685 case Intrinsic::spv_subgroup_max_size:
3686 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupMaxSize, ResVReg, ResType,
3688 case Intrinsic::spv_fdot:
3689 return selectFloatDot(ResVReg, ResType,
I);
3690 case Intrinsic::spv_udot:
3691 case Intrinsic::spv_sdot:
3692 if (STI.
canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
3694 return selectIntegerDot(ResVReg, ResType,
I,
3695 IID == Intrinsic::spv_sdot);
3696 return selectIntegerDotExpansion(ResVReg, ResType,
I);
3697 case Intrinsic::spv_dot4add_i8packed:
3698 if (STI.
canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
3700 return selectDot4AddPacked<true>(ResVReg, ResType,
I);
3701 return selectDot4AddPackedExpansion<true>(ResVReg, ResType,
I);
3702 case Intrinsic::spv_dot4add_u8packed:
3703 if (STI.
canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
3705 return selectDot4AddPacked<false>(ResVReg, ResType,
I);
3706 return selectDot4AddPackedExpansion<false>(ResVReg, ResType,
I);
3707 case Intrinsic::spv_all:
3708 return selectAll(ResVReg, ResType,
I);
3709 case Intrinsic::spv_any:
3710 return selectAny(ResVReg, ResType,
I);
3711 case Intrinsic::spv_cross:
3712 return selectExtInst(ResVReg, ResType,
I, CL::cross, GL::Cross);
3713 case Intrinsic::spv_distance:
3714 return selectExtInst(ResVReg, ResType,
I, CL::distance, GL::Distance);
3715 case Intrinsic::spv_lerp:
3716 return selectExtInst(ResVReg, ResType,
I, CL::mix, GL::FMix);
3717 case Intrinsic::spv_length:
3718 return selectExtInst(ResVReg, ResType,
I, CL::length, GL::Length);
3719 case Intrinsic::spv_degrees:
3720 return selectExtInst(ResVReg, ResType,
I, CL::degrees, GL::Degrees);
3721 case Intrinsic::spv_faceforward:
3722 return selectExtInst(ResVReg, ResType,
I, GL::FaceForward);
3723 case Intrinsic::spv_frac:
3724 return selectExtInst(ResVReg, ResType,
I, CL::fract, GL::Fract);
3725 case Intrinsic::spv_isinf:
3726 return selectOpIsInf(ResVReg, ResType,
I);
3727 case Intrinsic::spv_isnan:
3728 return selectOpIsNan(ResVReg, ResType,
I);
3729 case Intrinsic::spv_normalize:
3730 return selectExtInst(ResVReg, ResType,
I, CL::normalize, GL::Normalize);
3731 case Intrinsic::spv_refract:
3732 return selectExtInst(ResVReg, ResType,
I, GL::Refract);
3733 case Intrinsic::spv_reflect:
3734 return selectExtInst(ResVReg, ResType,
I, GL::Reflect);
3735 case Intrinsic::spv_rsqrt:
3736 return selectExtInst(ResVReg, ResType,
I, CL::rsqrt, GL::InverseSqrt);
3737 case Intrinsic::spv_sign:
3738 return selectSign(ResVReg, ResType,
I);
3739 case Intrinsic::spv_smoothstep:
3740 return selectExtInst(ResVReg, ResType,
I, CL::smoothstep, GL::SmoothStep);
3741 case Intrinsic::spv_firstbituhigh:
3742 return selectFirstBitHigh(ResVReg, ResType,
I,
false);
3743 case Intrinsic::spv_firstbitshigh:
3744 return selectFirstBitHigh(ResVReg, ResType,
I,
true);
3745 case Intrinsic::spv_firstbitlow:
3746 return selectFirstBitLow(ResVReg, ResType,
I);
3747 case Intrinsic::spv_group_memory_barrier_with_group_sync: {
3749 auto MemSemConstant =
3750 buildI32Constant(SPIRV::MemorySemantics::SequentiallyConsistent,
I);
3751 Register MemSemReg = MemSemConstant.first;
3752 Result &= MemSemConstant.second;
3753 auto ScopeConstant = buildI32Constant(SPIRV::Scope::Workgroup,
I);
3754 Register ScopeReg = ScopeConstant.first;
3755 Result &= ScopeConstant.second;
3758 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpControlBarrier))
3764 case Intrinsic::spv_generic_cast_to_ptr_explicit: {
3765 Register PtrReg =
I.getOperand(
I.getNumExplicitDefs() + 1).getReg();
3766 SPIRV::StorageClass::StorageClass ResSC =
3770 "Generic storage class");
3772 TII.get(SPIRV::OpGenericCastToPtrExplicit))
3779 case Intrinsic::spv_lifetime_start:
3780 case Intrinsic::spv_lifetime_end: {
3781 unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
3782 : SPIRV::OpLifetimeStop;
3783 int64_t
Size =
I.getOperand(
I.getNumExplicitDefs() + 1).getImm();
3784 Register PtrReg =
I.getOperand(
I.getNumExplicitDefs() + 2).getReg();
3792 case Intrinsic::spv_saturate:
3793 return selectSaturate(ResVReg, ResType,
I);
3794 case Intrinsic::spv_nclamp:
3795 return selectExtInst(ResVReg, ResType,
I, CL::fclamp, GL::NClamp);
3796 case Intrinsic::spv_uclamp:
3797 return selectExtInst(ResVReg, ResType,
I, CL::u_clamp, GL::UClamp);
3798 case Intrinsic::spv_sclamp:
3799 return selectExtInst(ResVReg, ResType,
I, CL::s_clamp, GL::SClamp);
3800 case Intrinsic::spv_wave_active_countbits:
3801 return selectWaveActiveCountBits(ResVReg, ResType,
I);
3802 case Intrinsic::spv_wave_all:
3803 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformAll);
3804 case Intrinsic::spv_wave_any:
3805 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformAny);
3806 case Intrinsic::spv_wave_is_first_lane:
3807 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformElect);
3808 case Intrinsic::spv_wave_reduce_umax:
3809 return selectWaveReduceMax(ResVReg, ResType,
I,
true);
3810 case Intrinsic::spv_wave_reduce_max:
3811 return selectWaveReduceMax(ResVReg, ResType,
I,
false);
3812 case Intrinsic::spv_wave_reduce_umin:
3813 return selectWaveReduceMin(ResVReg, ResType,
I,
true);
3814 case Intrinsic::spv_wave_reduce_min:
3815 return selectWaveReduceMin(ResVReg, ResType,
I,
false);
3816 case Intrinsic::spv_wave_reduce_sum:
3817 return selectWaveReduceSum(ResVReg, ResType,
I);
3818 case Intrinsic::spv_wave_readlane:
3819 return selectWaveOpInst(ResVReg, ResType,
I,
3820 SPIRV::OpGroupNonUniformShuffle);
3821 case Intrinsic::spv_step:
3822 return selectExtInst(ResVReg, ResType,
I, CL::step, GL::Step);
3823 case Intrinsic::spv_radians:
3824 return selectExtInst(ResVReg, ResType,
I, CL::radians, GL::Radians);
3828 case Intrinsic::instrprof_increment:
3829 case Intrinsic::instrprof_increment_step:
3830 case Intrinsic::instrprof_value_profile:
3833 case Intrinsic::spv_value_md:
3835 case Intrinsic::spv_resource_handlefrombinding: {
3836 return selectHandleFromBinding(ResVReg, ResType,
I);
3838 case Intrinsic::spv_resource_counterhandlefrombinding:
3839 return selectCounterHandleFromBinding(ResVReg, ResType,
I);
3840 case Intrinsic::spv_resource_updatecounter:
3841 return selectUpdateCounter(ResVReg, ResType,
I);
3842 case Intrinsic::spv_resource_store_typedbuffer: {
3843 return selectImageWriteIntrinsic(
I);
3845 case Intrinsic::spv_resource_load_typedbuffer: {
3846 return selectReadImageIntrinsic(ResVReg, ResType,
I);
3848 case Intrinsic::spv_resource_getpointer: {
3849 return selectResourceGetPointer(ResVReg, ResType,
I);
3851 case Intrinsic::spv_pushconstant_getpointer: {
3852 return selectPushConstantGetPointer(ResVReg, ResType,
I);
3854 case Intrinsic::spv_discard: {
3855 return selectDiscard(ResVReg, ResType,
I);
3857 case Intrinsic::spv_resource_nonuniformindex: {
3858 return selectResourceNonUniformIndex(ResVReg, ResType,
I);
3860 case Intrinsic::spv_unpackhalf2x16: {
3861 return selectExtInst(ResVReg, ResType,
I, GL::UnpackHalf2x16);
3863 case Intrinsic::spv_ddx:
3864 return selectDerivativeInst(ResVReg, ResType,
I, SPIRV::OpDPdx);
3865 case Intrinsic::spv_ddy:
3866 return selectDerivativeInst(ResVReg, ResType,
I, SPIRV::OpDPdy);
3867 case Intrinsic::spv_ddx_coarse:
3868 return selectDerivativeInst(ResVReg, ResType,
I, SPIRV::OpDPdxCoarse);
3869 case Intrinsic::spv_ddy_coarse:
3870 return selectDerivativeInst(ResVReg, ResType,
I, SPIRV::OpDPdyCoarse);
3871 case Intrinsic::spv_ddx_fine:
3872 return selectDerivativeInst(ResVReg, ResType,
I, SPIRV::OpDPdxFine);
3873 case Intrinsic::spv_ddy_fine:
3874 return selectDerivativeInst(ResVReg, ResType,
I, SPIRV::OpDPdyFine);
3875 case Intrinsic::spv_fwidth:
3876 return selectDerivativeInst(ResVReg, ResType,
I, SPIRV::OpFwidth);
3878 std::string DiagMsg;
3879 raw_string_ostream OS(DiagMsg);
3881 DiagMsg =
"Intrinsic selection not implemented: " + DiagMsg;
3888bool SPIRVInstructionSelector::selectHandleFromBinding(
Register &ResVReg,
3890 MachineInstr &
I)
const {
3893 if (ResType->
getOpcode() == SPIRV::OpTypeImage)
3900bool SPIRVInstructionSelector::selectCounterHandleFromBinding(
3903 assert(Intr.getIntrinsicID() ==
3904 Intrinsic::spv_resource_counterhandlefrombinding);
3907 Register MainHandleReg = Intr.getOperand(2).getReg();
3909 assert(MainHandleDef->getIntrinsicID() ==
3910 Intrinsic::spv_resource_handlefrombinding);
3914 uint32_t ArraySize =
getIConstVal(MainHandleDef->getOperand(4).getReg(),
MRI);
3915 Register IndexReg = MainHandleDef->getOperand(5).getReg();
3916 std::string CounterName =
3921 MachineIRBuilder MIRBuilder(
I);
3922 Register CounterVarReg = buildPointerToResource(
3924 Binding, ArraySize, IndexReg, CounterName, MIRBuilder);
3926 return BuildCOPY(ResVReg, CounterVarReg,
I);
3929bool SPIRVInstructionSelector::selectUpdateCounter(
Register &ResVReg,
3931 MachineInstr &
I)
const {
3933 assert(Intr.getIntrinsicID() == Intrinsic::spv_resource_updatecounter);
3935 Register CounterHandleReg = Intr.getOperand(2).getReg();
3936 Register IncrReg = Intr.getOperand(3).getReg();
3944 assert(CounterVarPointeeType &&
3945 CounterVarPointeeType->
getOpcode() == SPIRV::OpTypeStruct &&
3946 "Counter variable must be a struct");
3948 SPIRV::StorageClass::StorageBuffer &&
3949 "Counter variable must be in the storage buffer storage class");
3951 "Counter variable must have exactly 1 member in the struct");
3955 "Counter variable struct must have a single i32 member");
3959 MachineIRBuilder MIRBuilder(
I);
3961 Type::getInt32Ty(
I.getMF()->getFunction().getContext());
3964 LLVMIntType, MIRBuilder, SPIRV::StorageClass::StorageBuffer);
3966 auto Zero = buildI32Constant(0,
I);
3972 if (!
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3973 TII.get(SPIRV::OpAccessChain))
3976 .
addUse(CounterHandleReg)
3984 auto Scope = buildI32Constant(SPIRV::Scope::Device,
I);
3987 auto Semantics = buildI32Constant(SPIRV::MemorySemantics::None,
I);
3988 if (!Semantics.second)
3992 auto Incr = buildI32Constant(
static_cast<uint32_t
>(IncrVal),
I);
3997 if (!
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpAtomicIAdd))
4008 return BuildCOPY(ResVReg, AtomicRes,
I);
4016 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpIAddS))
4023bool SPIRVInstructionSelector::selectReadImageIntrinsic(
4032 Register ImageReg =
I.getOperand(2).getReg();
4034 Register NewImageReg =
MRI->createVirtualRegister(
MRI->getRegClass(ImageReg));
4040 Register IdxReg =
I.getOperand(3).getReg();
4042 MachineInstr &Pos =
I;
4044 return generateImageReadOrFetch(ResVReg, ResType, NewImageReg, IdxReg, Loc,
4048bool SPIRVInstructionSelector::generateImageReadOrFetch(
4053 "ImageReg is not an image type.");
4055 bool IsSignedInteger =
4060 bool IsFetch = (SampledOp.getImm() == 1);
4063 if (ResultSize == 4) {
4066 TII.get(IsFetch ? SPIRV::OpImageFetch : SPIRV::OpImageRead))
4072 if (IsSignedInteger)
4077 SPIRVType *ReadType = widenTypeToVec4(ResType, Pos);
4081 TII.get(IsFetch ? SPIRV::OpImageFetch : SPIRV::OpImageRead))
4086 if (IsSignedInteger)
4092 if (ResultSize == 1) {
4094 TII.get(SPIRV::OpCompositeExtract))
4101 return extractSubvector(ResVReg, ResType, ReadReg, Pos);
4104bool SPIRVInstructionSelector::selectResourceGetPointer(
4106 Register ResourcePtr =
I.getOperand(2).getReg();
4108 if (RegType->
getOpcode() == SPIRV::OpTypeImage) {
4117 MachineIRBuilder MIRBuilder(
I);
4119 Register IndexReg =
I.getOperand(3).getReg();
4122 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
4123 TII.get(SPIRV::OpAccessChain))
4132bool SPIRVInstructionSelector::selectPushConstantGetPointer(
4134 MRI->replaceRegWith(ResVReg,
I.getOperand(2).getReg());
4138bool SPIRVInstructionSelector::selectResourceNonUniformIndex(
4140 Register ObjReg =
I.getOperand(2).getReg();
4141 if (!BuildCOPY(ResVReg, ObjReg,
I))
4151 decorateUsesAsNonUniform(ResVReg);
4155void SPIRVInstructionSelector::decorateUsesAsNonUniform(
4158 while (WorkList.
size() > 0) {
4162 bool IsDecorated =
false;
4163 for (MachineInstr &Use :
MRI->use_instructions(CurrentReg)) {
4164 if (
Use.getOpcode() == SPIRV::OpDecorate &&
4165 Use.getOperand(1).getImm() == SPIRV::Decoration::NonUniformEXT) {
4171 if (
Use.getOperand(0).isReg() &&
Use.getOperand(0).isDef()) {
4173 if (ResultReg == CurrentReg)
4181 SPIRV::Decoration::NonUniformEXT, {});
4186bool SPIRVInstructionSelector::extractSubvector(
4188 MachineInstr &InsertionPoint)
const {
4190 [[maybe_unused]] uint64_t InputSize =
4193 assert(InputSize > 1 &&
"The input must be a vector.");
4194 assert(ResultSize > 1 &&
"The result must be a vector.");
4195 assert(ResultSize < InputSize &&
4196 "Cannot extract more element than there are in the input.");
4199 const TargetRegisterClass *ScalarRegClass = GR.
getRegClass(ScalarType);
4200 for (uint64_t
I = 0;
I < ResultSize;
I++) {
4201 Register ComponentReg =
MRI->createVirtualRegister(ScalarRegClass);
4204 TII.get(SPIRV::OpCompositeExtract))
4215 MachineInstrBuilder MIB =
BuildMI(*InsertionPoint.
getParent(), InsertionPoint,
4217 TII.get(SPIRV::OpCompositeConstruct))
4221 for (
Register ComponentReg : ComponentRegisters)
4222 MIB.
addUse(ComponentReg);
4226bool SPIRVInstructionSelector::selectImageWriteIntrinsic(
4227 MachineInstr &
I)
const {
4234 Register ImageReg =
I.getOperand(1).getReg();
4236 Register NewImageReg =
MRI->createVirtualRegister(
MRI->getRegClass(ImageReg));
4242 Register CoordinateReg =
I.getOperand(2).getReg();
4243 Register DataReg =
I.getOperand(3).getReg();
4246 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
4247 TII.get(SPIRV::OpImageWrite))
4254Register SPIRVInstructionSelector::buildPointerToResource(
4255 const SPIRVType *SpirvResType, SPIRV::StorageClass::StorageClass SC,
4256 uint32_t Set, uint32_t
Binding, uint32_t ArraySize,
Register IndexReg,
4257 StringRef Name, MachineIRBuilder MIRBuilder)
const {
4259 if (ArraySize == 1) {
4263 "SpirvResType did not have an explicit layout.");
4268 const Type *VarType = ArrayType::get(
const_cast<Type *
>(ResType), ArraySize);
4272 VarPointerType, Set,
Binding, Name, MIRBuilder);
4287bool SPIRVInstructionSelector::selectFirstBitSet16(
4289 unsigned ExtendOpcode,
unsigned BitSetOpcode)
const {
4291 bool Result = selectOpWithSrcs(ExtReg, ResType,
I, {
I.getOperand(2).getReg()},
4295 selectFirstBitSet32(ResVReg, ResType,
I, ExtReg, BitSetOpcode);
4298bool SPIRVInstructionSelector::selectFirstBitSet32(
4300 Register SrcReg,
unsigned BitSetOpcode)
const {
4301 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
4304 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
4310bool SPIRVInstructionSelector::selectFirstBitSet64Overflow(
4312 Register SrcReg,
unsigned BitSetOpcode,
bool SwapPrimarySide)
const {
4319 assert(ComponentCount < 5 &&
"Vec 5+ will generate invalid SPIR-V ops");
4321 MachineIRBuilder MIRBuilder(
I);
4329 std::vector<Register> PartialRegs;
4332 unsigned CurrentComponent = 0;
4333 for (; CurrentComponent + 1 < ComponentCount; CurrentComponent += 2) {
4339 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
4340 TII.get(SPIRV::OpVectorShuffle))
4345 .
addImm(CurrentComponent)
4346 .
addImm(CurrentComponent + 1);
4354 if (!selectFirstBitSet64(SubVecBitSetReg, Vec2ResType,
I, BitSetResult,
4355 BitSetOpcode, SwapPrimarySide))
4358 PartialRegs.push_back(SubVecBitSetReg);
4362 if (CurrentComponent != ComponentCount) {
4368 if (!selectOpWithSrcs(FinalElemReg, I64Type,
I, {SrcReg, ConstIntLastIdx},
4369 SPIRV::OpVectorExtractDynamic))
4375 if (!selectFirstBitSet64(FinalElemBitSetReg,
BaseType,
I, FinalElemReg,
4376 BitSetOpcode, SwapPrimarySide))
4379 PartialRegs.push_back(FinalElemBitSetReg);
4384 return selectOpWithSrcs(ResVReg, ResType,
I, std::move(PartialRegs),
4385 SPIRV::OpCompositeConstruct);
4388bool SPIRVInstructionSelector::selectFirstBitSet64(
4390 Register SrcReg,
unsigned BitSetOpcode,
bool SwapPrimarySide)
const {
4403 if (ComponentCount > 2) {
4404 return selectFirstBitSet64Overflow(ResVReg, ResType,
I, SrcReg,
4405 BitSetOpcode, SwapPrimarySide);
4409 MachineIRBuilder MIRBuilder(
I);
4411 BaseType, 2 * ComponentCount, MIRBuilder,
false);
4415 if (!selectOpWithSrcs(BitcastReg, PostCastType,
I, {SrcReg},
4421 if (!selectFirstBitSet32(FBSReg, PostCastType,
I, BitcastReg, BitSetOpcode))
4428 bool IsScalarRes = ResType->
getOpcode() != SPIRV::OpTypeVector;
4431 if (!selectOpWithSrcs(HighReg, ResType,
I, {FBSReg, ConstIntZero},
4432 SPIRV::OpVectorExtractDynamic))
4434 if (!selectOpWithSrcs(LowReg, ResType,
I, {FBSReg, ConstIntOne},
4435 SPIRV::OpVectorExtractDynamic))
4439 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
4440 TII.get(SPIRV::OpVectorShuffle))
4448 for (
unsigned J = 0; J < ComponentCount * 2; J += 2) {
4455 MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
4456 TII.get(SPIRV::OpVectorShuffle))
4464 for (
unsigned J = 1; J < ComponentCount * 2; J += 2) {
4485 SelectOp = SPIRV::OpSelectSISCond;
4486 AddOp = SPIRV::OpIAddS;
4494 SelectOp = SPIRV::OpSelectVIVCond;
4495 AddOp = SPIRV::OpIAddV;
4505 if (SwapPrimarySide) {
4506 PrimaryReg = LowReg;
4507 SecondaryReg = HighReg;
4508 PrimaryShiftReg = Reg0;
4509 SecondaryShiftReg = Reg32;
4514 if (!selectOpWithSrcs(BReg, BoolType,
I, {PrimaryReg, NegOneReg},
4520 if (!selectOpWithSrcs(TmpReg, ResType,
I, {
BReg, SecondaryReg, PrimaryReg},
4526 if (!selectOpWithSrcs(ValReg, ResType,
I,
4527 {
BReg, SecondaryShiftReg, PrimaryShiftReg}, SelectOp))
4530 return selectOpWithSrcs(ResVReg, ResType,
I, {ValReg, TmpReg}, AddOp);
4533bool SPIRVInstructionSelector::selectFirstBitHigh(
Register ResVReg,
4536 bool IsSigned)
const {
4538 Register OpReg =
I.getOperand(2).getReg();
4541 unsigned ExtendOpcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
4542 unsigned BitSetOpcode = IsSigned ? GL::FindSMsb : GL::FindUMsb;
4546 return selectFirstBitSet16(ResVReg, ResType,
I, ExtendOpcode, BitSetOpcode);
4548 return selectFirstBitSet32(ResVReg, ResType,
I, OpReg, BitSetOpcode);
4550 return selectFirstBitSet64(ResVReg, ResType,
I, OpReg, BitSetOpcode,
4554 "spv_firstbituhigh and spv_firstbitshigh only support 16,32,64 bits.");
4558bool SPIRVInstructionSelector::selectFirstBitLow(
Register ResVReg,
4560 MachineInstr &
I)
const {
4562 Register OpReg =
I.getOperand(2).getReg();
4567 unsigned ExtendOpcode = SPIRV::OpUConvert;
4568 unsigned BitSetOpcode = GL::FindILsb;
4572 return selectFirstBitSet16(ResVReg, ResType,
I, ExtendOpcode, BitSetOpcode);
4574 return selectFirstBitSet32(ResVReg, ResType,
I, OpReg, BitSetOpcode);
4576 return selectFirstBitSet64(ResVReg, ResType,
I, OpReg, BitSetOpcode,
4583bool SPIRVInstructionSelector::selectAllocaArray(
Register ResVReg,
4585 MachineInstr &
I)
const {
4589 bool Res =
BuildMI(BB,
I,
I.getDebugLoc(),
4590 TII.get(SPIRV::OpVariableLengthArrayINTEL))
4593 .
addUse(
I.getOperand(2).getReg())
4596 unsigned Alignment =
I.getOperand(3).getImm();
4602bool SPIRVInstructionSelector::selectFrameIndex(
Register ResVReg,
4604 MachineInstr &
I)
const {
4608 bool Res =
BuildMI(*It->getParent(), It, It->getDebugLoc(),
4609 TII.get(SPIRV::OpVariable))
4612 .
addImm(
static_cast<uint32_t
>(SPIRV::StorageClass::Function))
4615 unsigned Alignment =
I.getOperand(2).getImm();
4622bool SPIRVInstructionSelector::selectBranch(MachineInstr &
I)
const {
4627 const MachineInstr *PrevI =
I.getPrevNode();
4629 if (PrevI !=
nullptr && PrevI->
getOpcode() == TargetOpcode::G_BRCOND) {
4630 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
4633 .
addMBB(
I.getOperand(0).getMBB())
4637 .
addMBB(
I.getOperand(0).getMBB())
4641bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &
I)
const {
4652 const MachineInstr *NextI =
I.getNextNode();
4654 if (NextI !=
nullptr && NextI->
getOpcode() == SPIRV::OpBranchConditional)
4660 MachineBasicBlock *NextMBB =
I.getMF()->getBlockNumbered(NextMBBNum);
4661 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
4662 .
addUse(
I.getOperand(0).getReg())
4663 .
addMBB(
I.getOperand(1).getMBB())
4668bool SPIRVInstructionSelector::selectPhi(
Register ResVReg,
4670 MachineInstr &
I)
const {
4671 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpPhi))
4674 const unsigned NumOps =
I.getNumOperands();
4675 for (
unsigned i = 1; i <
NumOps; i += 2) {
4676 MIB.
addUse(
I.getOperand(i + 0).getReg());
4677 MIB.
addMBB(
I.getOperand(i + 1).getMBB());
4685bool SPIRVInstructionSelector::selectGlobalValue(
4686 Register ResVReg, MachineInstr &
I,
const MachineInstr *Init)
const {
4688 MachineIRBuilder MIRBuilder(
I);
4689 const GlobalValue *GV =
I.
getOperand(1).getGlobal();
4692 std::string GlobalIdent;
4694 unsigned &
ID = UnnamedGlobalIDs[GV];
4696 ID = UnnamedGlobalIDs.size();
4697 GlobalIdent =
"__unnamed_" + Twine(
ID).str();
4724 GVFun ? SPIRV::StorageClass::CodeSectionINTEL
4731 MachineRegisterInfo *
MRI = MIRBuilder.
getMRI();
4734 MRI->setRegClass(FuncVReg, &SPIRV::pIDRegClass);
4735 MachineInstrBuilder MIB1 =
4736 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
4739 MachineInstrBuilder MIB2 =
4741 TII.get(SPIRV::OpConstantFunctionPointerINTEL))
4745 GR.
add(ConstVal, MIB2);
4751 MachineInstrBuilder MIB3 =
4752 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
4755 GR.
add(ConstVal, MIB3);
4758 assert(NewReg != ResVReg);
4759 return BuildCOPY(ResVReg, NewReg,
I);
4769 const std::optional<SPIRV::LinkageType::LinkageType> LnkType =
4778 GlobalVar->isConstant(), LnkType, MIRBuilder,
true);
4782bool SPIRVInstructionSelector::selectLog10(
Register ResVReg,
4784 MachineInstr &
I)
const {
4786 return selectExtInst(ResVReg, ResType,
I, CL::log10);
4794 MachineIRBuilder MIRBuilder(
I);
4800 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
4803 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
4805 .
add(
I.getOperand(1))
4810 ResType->
getOpcode() == SPIRV::OpTypeFloat);
4813 ResType->
getOpcode() == SPIRV::OpTypeVector
4820 auto Opcode = ResType->
getOpcode() == SPIRV::OpTypeVector
4821 ? SPIRV::OpVectorTimesScalar
4831bool SPIRVInstructionSelector::selectModf(
Register ResVReg,
4833 MachineInstr &
I)
const {
4849 MachineIRBuilder MIRBuilder(
I);
4852 ResType, MIRBuilder, SPIRV::StorageClass::Function);
4864 MachineBasicBlock &EntryBB =
I.getMF()->front();
4868 BuildMI(EntryBB, VarPos,
I.getDebugLoc(),
TII.get(SPIRV::OpVariable))
4871 .
addImm(
static_cast<uint32_t
>(SPIRV::StorageClass::Function));
4877 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
4880 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::OpenCL_std))
4883 .
add(
I.getOperand(
I.getNumExplicitDefs()))
4887 Register IntegralPartReg =
I.getOperand(1).getReg();
4888 if (IntegralPartReg.
isValid()) {
4890 auto LoadMIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
4899 assert(
false &&
"GLSL::Modf is deprecated.");
4910bool SPIRVInstructionSelector::loadVec3BuiltinInputID(
4911 SPIRV::BuiltIn::BuiltIn BuiltInValue,
Register ResVReg,
4912 const SPIRVType *ResType, MachineInstr &
I)
const {
4913 MachineIRBuilder MIRBuilder(
I);
4917 Vec3Ty, MIRBuilder, SPIRV::StorageClass::Input);
4929 SPIRV::StorageClass::Input,
nullptr,
true, std::nullopt, MIRBuilder,
4933 MachineRegisterInfo *
MRI = MIRBuilder.
getMRI();
4934 Register LoadedRegister =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
4940 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
4947 assert(
I.getOperand(2).isReg());
4948 const uint32_t ThreadId =
foldImm(
I.getOperand(2),
MRI);
4952 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
4962bool SPIRVInstructionSelector::loadBuiltinInputID(
4963 SPIRV::BuiltIn::BuiltIn BuiltInValue,
Register ResVReg,
4964 const SPIRVType *ResType, MachineInstr &
I)
const {
4965 MachineIRBuilder MIRBuilder(
I);
4967 ResType, MIRBuilder, SPIRV::StorageClass::Input);
4982 SPIRV::StorageClass::Input,
nullptr,
true, std::nullopt, MIRBuilder,
4986 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
4995 MachineInstr &
I)
const {
4996 MachineIRBuilder MIRBuilder(
I);
4997 if (
Type->getOpcode() != SPIRV::OpTypeVector)
5001 if (VectorSize == 4)
5009bool SPIRVInstructionSelector::loadHandleBeforePosition(
5011 MachineInstr &Pos)
const {
5014 Intrinsic::spv_resource_handlefrombinding);
5022 bool IsStructuredBuffer = ResType->
getOpcode() == SPIRV::OpTypePointer;
5023 MachineIRBuilder MIRBuilder(HandleDef);
5025 SPIRV::StorageClass::StorageClass SC = SPIRV::StorageClass::UniformConstant;
5027 if (IsStructuredBuffer) {
5032 Register VarReg = buildPointerToResource(VarType, SC, Set,
Binding, ArraySize,
5033 IndexReg, Name, MIRBuilder);
5037 uint32_t LoadOpcode =
5038 IsStructuredBuffer ? SPIRV::OpCopyObject : SPIRV::OpLoad;
5041 TII.get(LoadOpcode))
5048void SPIRVInstructionSelector::errorIfInstrOutsideShader(
5049 MachineInstr &
I)
const {
5051 std::string DiagMsg;
5052 raw_string_ostream OS(DiagMsg);
5053 I.print(OS,
true,
false,
false,
false);
5054 DiagMsg +=
" is only supported in shaders.\n";
5060InstructionSelector *
5064 return new SPIRVInstructionSelector(TM, Subtarget, RBI);
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
DXIL Resource Implicit Binding
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
LLVMTypeRef LLVMIntType(unsigned NumBits)
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
Register const TargetRegisterInfo * TRI
Promote Memory to Register
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static StringRef getName(Value *V)
static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size)
static APFloat getOneFP(const Type *LLVMFloatTy)
static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC)
static bool isASCastInGVar(MachineRegisterInfo *MRI, Register ResVReg)
static bool mayApplyGenericSelection(unsigned Opcode)
static APFloat getZeroFP(const Type *LLVMFloatTy)
std::vector< std::pair< SPIRV::InstructionSet::InstructionSet, uint32_t > > ExtInstList
static bool intrinsicHasSideEffects(Intrinsic::ID ID)
static unsigned getBoolCmpOpcode(unsigned PredNum)
static unsigned getICmpOpcode(unsigned PredNum)
static bool isOpcodeWithNoSideEffects(unsigned Opcode)
static void addMemoryOperands(MachineMemOperand *MemOp, MachineInstrBuilder &MIB, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry &GR)
static bool isConstReg(MachineRegisterInfo *MRI, MachineInstr *OpDef, SmallPtrSet< SPIRVType *, 4 > &Visited)
static unsigned getPtrCmpOpcode(unsigned Pred)
bool isDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static const fltSemantics & IEEEsingle()
static const fltSemantics & IEEEdouble()
static const fltSemantics & IEEEhalf()
static APFloat getOne(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative One.
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
uint64_t getZExtValue() const
Get zero extended value.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Represents a call to an intrinsic.
Intrinsic::ID getIntrinsicID() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
@ InternalLinkage
Rename collisions when linking (static functions).
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr bool isPointer() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
LLVM_ABI iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
unsigned getNumOperands() const
Retuns the total number of operands.
LLVM_ABI void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
LLVM_ABI unsigned getNumExplicitDefs() const
Returns the number of non-implicit definitions.
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
@ MOVolatile
The memory access is volatile.
@ MONonTemporal
The memory access is non-temporal.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
defusechain_instr_iterator< true, false, false, true > use_instr_iterator
use_instr_iterator/use_instr_begin/use_instr_end - Walk all uses of the specified register,...
defusechain_instr_iterator< false, true, false, true > def_instr_iterator
def_instr_iterator/def_instr_begin/def_instr_end - Walk all defs of the specified register,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
Analysis providing profile information.
Holds all the information related to register banks.
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
SPIRVType * getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
Register getOrCreateConstInt(uint64_t Val, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII, bool ZeroAsNull=true)
SPIRVType * getResultType(Register VReg, MachineFunction *MF=nullptr)
SPIRVType * getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder, bool EmitIR)
MachineInstr * getOrAddMemAliasingINTELInst(MachineIRBuilder &MIRBuilder, const MDNode *AliasingListMD)
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, const MachineFunction &MF)
Register getOrCreateUndef(MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
Register buildGlobalVariable(Register Reg, SPIRVType *BaseType, StringRef Name, const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage, const MachineInstr *Init, bool IsConst, const std::optional< SPIRV::LinkageType::LinkageType > &LinkageType, MachineIRBuilder &MIRBuilder, bool IsInstSelector)
SPIRVType * changePointerStorageClass(SPIRVType *PtrType, SPIRV::StorageClass::StorageClass SC, MachineInstr &I)
const Type * getTypeForSPIRVType(const SPIRVType *Ty) const
bool isBitcastCompatible(const SPIRVType *Type1, const SPIRVType *Type2) const
unsigned getScalarOrVectorComponentCount(Register VReg) const
SPIRVType * getOrCreateSPIRVFloatType(unsigned BitWidth, MachineInstr &I, const SPIRVInstrInfo &TII)
bool isScalarOrVectorSigned(const SPIRVType *Type) const
Register getOrCreateGlobalVariableWithBinding(const SPIRVType *VarType, uint32_t Set, uint32_t Binding, StringRef Name, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineInstr &I, SPIRV::AccessQualifier::AccessQualifier AQ, bool EmitIR)
unsigned getPointerSize() const
SPIRVType * getOrCreateSPIRVPointerType(const Type *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SC)
Register buildConstantFP(APFloat Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr)
SPIRVType * getPointeeType(SPIRVType *PtrType)
void invalidateMachineInstr(MachineInstr *MI)
Register getSPIRVTypeID(const SPIRVType *SpirvType) const
bool isScalarOfType(Register VReg, unsigned TypeOpcode) const
bool findValueAttrs(const MachineInstr *Key, Type *&Ty, StringRef &Name)
void addGlobalObject(const Value *V, const MachineFunction *MF, Register R)
SPIRVType * getScalarOrVectorComponentType(Register VReg) const
void recordFunctionPointer(const MachineOperand *MO, const Function *F)
bool isAggregateType(SPIRVType *Type) const
const TargetRegisterClass * getRegClass(SPIRVType *SpvType) const
SPIRVType * getOrCreateSPIRVVectorType(SPIRVType *BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder, bool EmitIR)
bool isScalarOrVectorOfType(Register VReg, unsigned TypeOpcode) const
Register getOrCreateConstIntArray(uint64_t Val, size_t Num, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
MachineFunction * setCurrentFunc(MachineFunction &MF)
Register getOrCreateConstVector(uint64_t Val, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII, bool ZeroAsNull=true)
SPIRVType * getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineIRBuilder &MIRBuilder)
Type * getDeducedGlobalValueType(const GlobalValue *Global)
LLT getRegType(SPIRVType *SpvType) const
SPIRV::StorageClass::StorageClass getPointerStorageClass(Register VReg) const
Register getOrCreateConstFP(APFloat Val, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII, bool ZeroAsNull=true)
Register getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorBitWidth(const SPIRVType *Type) const
const SPIRVType * retrieveScalarOrVectorIntType(const SPIRVType *Type) const
bool erase(const MachineInstr *MI)
bool add(SPIRV::IRHandle Handle, const MachineInstr *MI)
Register find(SPIRV::IRHandle Handle, const MachineFunction *MF)
bool isPhysicalSPIRV() const
bool isAtLeastSPIRVVer(VersionTuple VerToCompareTo) const
bool canUseExtInstSet(SPIRV::InstructionSet::InstructionSet E) const
bool isLogicalSPIRV() const
bool canUseExtension(SPIRV::Extension::Extension E) const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
The instances of the Type class are immutable: once they are created, they are never changed.
@ HalfTyID
16-bit floating point type
@ FloatTyID
32-bit floating point type
@ DoubleTyID
64-bit floating point type
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
bool isStructTy() const
True if this is an instance of StructType.
TypeID getTypeID() const
Return the type id for the type.
Value * getOperand(unsigned i) const
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Scope
Defines the scope in which this symbol should be visible: Default – Visible in the public interface o...
NodeAddr< DefNode * > Def
NodeAddr< InstrNode * > Instr
NodeAddr< UseNode * > Use
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
int64_t getIConstValSext(Register ConstReg, const MachineRegisterInfo *MRI)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool isTypeFoldingSupported(unsigned Opcode)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
LLVM_ABI bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
unsigned getArrayComponentCount(const MachineRegisterInfo *MRI, const MachineInstr *ResType)
constexpr bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
SmallVector< MachineInstr *, 4 > createContinuedInstructions(MachineIRBuilder &MIRBuilder, unsigned Opcode, unsigned MinWC, unsigned ContinuedOpcode, ArrayRef< Register > Args, Register ReturnRegister, Register TypeID)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
constexpr unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
MachineBasicBlock::iterator getFirstValidInstructionInsertPoint(MachineBasicBlock &BB)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
MachineBasicBlock::iterator getOpVariableMBBIt(MachineInstr &I)
Register createVirtualRegister(SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
Type * toTypedPointer(Type *Ty)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
const MachineInstr SPIRVType
constexpr bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
MachineInstr * passCopy(MachineInstr *Def, const MachineRegisterInfo *MRI)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
std::optional< SPIRV::LinkageType::LinkageType > getSpirvLinkageTypeFor(const SPIRVSubtarget &ST, const GlobalValue &GV)
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
AtomicOrdering
Atomic ordering for LLVM's memory model.
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
InstructionSelector * createSPIRVInstructionSelector(const SPIRVTargetMachine &TM, const SPIRVSubtarget &Subtarget, const RegisterBankInfo &RBI)
std::string getStringValueFromReg(Register Reg, MachineRegisterInfo &MRI)
int64_t foldImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
DWARFExpression::Operation Op
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool hasInitializer(const GlobalVariable *GV)
MachineInstr * getVRegDef(MachineRegisterInfo &MRI, Register Reg)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
LLVM_ABI bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Check whether an instruction MI is dead: it only defines dead virtual registers, and doesn't have oth...