31#include "llvm/IR/IntrinsicsSPIRV.h"
56#define DEBUG_TYPE "spirv-isel"
59namespace CL = SPIRV::OpenCLExtInst;
60namespace GL = SPIRV::GLSLExtInst;
63 std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
67#define GET_GLOBALISEL_PREDICATE_BITSET
68#include "SPIRVGenGlobalISel.inc"
69#undef GET_GLOBALISEL_PREDICATE_BITSET
95#define GET_GLOBALISEL_PREDICATES_DECL
96#include "SPIRVGenGlobalISel.inc"
97#undef GET_GLOBALISEL_PREDICATES_DECL
99#define GET_GLOBALISEL_TEMPORARIES_DECL
100#include "SPIRVGenGlobalISel.inc"
101#undef GET_GLOBALISEL_TEMPORARIES_DECL
118 unsigned Opcode)
const;
120 unsigned Opcode)
const;
137 unsigned NegateOpcode = 0)
const;
172 bool IsSigned)
const;
174 bool IsSigned,
unsigned Opcode)
const;
176 bool IsSigned)
const;
216 GL::GLSLExtInst GLInst)
const;
229 const SPIRVType *ResType =
nullptr)
const;
241#define GET_GLOBALISEL_IMPL
242#include "SPIRVGenGlobalISel.inc"
243#undef GET_GLOBALISEL_IMPL
249 TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
251#include
"SPIRVGenGlobalISel.inc"
254#include
"SPIRVGenGlobalISel.inc"
265 GR.setCurrentFunc(MF);
266 InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
275 assert(
I.getParent() &&
"Instruction should be in a basic block!");
276 assert(
I.getParent()->getParent() &&
"Instruction should be in a function!");
281 if (Opcode == SPIRV::ASSIGN_TYPE) {
282 auto *
Def =
MRI->getVRegDef(
I.getOperand(1).getReg());
284 bool Res = selectImpl(
I, *CoverageInfo);
285 assert(Res ||
Def->getOpcode() == TargetOpcode::G_CONSTANT);
289 MRI->replaceRegWith(
I.getOperand(1).getReg(),
I.getOperand(0).getReg());
290 I.removeFromParent();
292 }
else if (
I.getNumDefs() == 1) {
299 if (
I.getNumOperands() !=
I.getNumExplicitOperands()) {
300 LLVM_DEBUG(
errs() <<
"Generic instr has unexpected implicit operands\n");
306 bool HasDefs =
I.getNumDefs() > 0;
308 SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) :
nullptr;
309 assert(!HasDefs || ResType ||
I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
310 if (spvSelect(ResVReg, ResType,
I)) {
312 for (
unsigned i = 0; i <
I.getNumDefs(); ++i)
314 I.removeFromParent();
320bool SPIRVInstructionSelector::spvSelect(
Register ResVReg,
323 const unsigned Opcode =
I.getOpcode();
325 return selectImpl(
I, *CoverageInfo);
327 case TargetOpcode::G_CONSTANT:
328 return selectConst(ResVReg, ResType,
I.getOperand(1).getCImm()->getValue(),
330 case TargetOpcode::G_GLOBAL_VALUE:
331 return selectGlobalValue(ResVReg,
I);
332 case TargetOpcode::G_IMPLICIT_DEF:
333 return selectOpUndef(ResVReg, ResType,
I);
334 case TargetOpcode::G_FREEZE:
335 return selectFreeze(ResVReg, ResType,
I);
337 case TargetOpcode::G_INTRINSIC:
338 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
339 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
340 return selectIntrinsic(ResVReg, ResType,
I);
341 case TargetOpcode::G_BITREVERSE:
342 return selectBitreverse(ResVReg, ResType,
I);
344 case TargetOpcode::G_BUILD_VECTOR:
345 return selectConstVector(ResVReg, ResType,
I);
346 case TargetOpcode::G_SPLAT_VECTOR:
347 return selectSplatVector(ResVReg, ResType,
I);
349 case TargetOpcode::G_SHUFFLE_VECTOR: {
351 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorShuffle))
353 .
addUse(GR.getSPIRVTypeID(ResType))
354 .
addUse(
I.getOperand(1).getReg())
355 .
addUse(
I.getOperand(2).getReg());
356 for (
auto V :
I.getOperand(3).getShuffleMask())
360 case TargetOpcode::G_MEMMOVE:
361 case TargetOpcode::G_MEMCPY:
362 case TargetOpcode::G_MEMSET:
363 return selectMemOperation(ResVReg,
I);
365 case TargetOpcode::G_ICMP:
366 return selectICmp(ResVReg, ResType,
I);
367 case TargetOpcode::G_FCMP:
368 return selectFCmp(ResVReg, ResType,
I);
370 case TargetOpcode::G_FRAME_INDEX:
371 return selectFrameIndex(ResVReg, ResType,
I);
373 case TargetOpcode::G_LOAD:
374 return selectLoad(ResVReg, ResType,
I);
375 case TargetOpcode::G_STORE:
376 return selectStore(
I);
378 case TargetOpcode::G_BR:
379 return selectBranch(
I);
380 case TargetOpcode::G_BRCOND:
381 return selectBranchCond(
I);
383 case TargetOpcode::G_PHI:
384 return selectPhi(ResVReg, ResType,
I);
386 case TargetOpcode::G_FPTOSI:
387 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToS);
388 case TargetOpcode::G_FPTOUI:
389 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToU);
391 case TargetOpcode::G_SITOFP:
392 return selectIToF(ResVReg, ResType,
I,
true, SPIRV::OpConvertSToF);
393 case TargetOpcode::G_UITOFP:
394 return selectIToF(ResVReg, ResType,
I,
false, SPIRV::OpConvertUToF);
396 case TargetOpcode::G_CTPOP:
397 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitCount);
398 case TargetOpcode::G_SMIN:
399 return selectExtInst(ResVReg, ResType,
I, CL::s_min, GL::SMin);
400 case TargetOpcode::G_UMIN:
401 return selectExtInst(ResVReg, ResType,
I, CL::u_min, GL::UMin);
403 case TargetOpcode::G_SMAX:
404 return selectExtInst(ResVReg, ResType,
I, CL::s_max, GL::SMax);
405 case TargetOpcode::G_UMAX:
406 return selectExtInst(ResVReg, ResType,
I, CL::u_max, GL::UMax);
408 case TargetOpcode::G_FMA:
409 return selectExtInst(ResVReg, ResType,
I, CL::fma, GL::Fma);
411 case TargetOpcode::G_FPOW:
412 return selectExtInst(ResVReg, ResType,
I, CL::pow, GL::Pow);
413 case TargetOpcode::G_FPOWI:
414 return selectExtInst(ResVReg, ResType,
I, CL::pown);
416 case TargetOpcode::G_FEXP:
417 return selectExtInst(ResVReg, ResType,
I, CL::exp, GL::Exp);
418 case TargetOpcode::G_FEXP2:
419 return selectExtInst(ResVReg, ResType,
I, CL::exp2, GL::Exp2);
421 case TargetOpcode::G_FLOG:
422 return selectExtInst(ResVReg, ResType,
I, CL::log, GL::Log);
423 case TargetOpcode::G_FLOG2:
424 return selectExtInst(ResVReg, ResType,
I, CL::log2, GL::Log2);
425 case TargetOpcode::G_FLOG10:
426 return selectLog10(ResVReg, ResType,
I);
428 case TargetOpcode::G_FABS:
429 return selectExtInst(ResVReg, ResType,
I, CL::fabs, GL::FAbs);
430 case TargetOpcode::G_ABS:
431 return selectExtInst(ResVReg, ResType,
I, CL::s_abs, GL::SAbs);
433 case TargetOpcode::G_FMINNUM:
434 case TargetOpcode::G_FMINIMUM:
435 return selectExtInst(ResVReg, ResType,
I, CL::fmin, GL::FMin);
436 case TargetOpcode::G_FMAXNUM:
437 case TargetOpcode::G_FMAXIMUM:
438 return selectExtInst(ResVReg, ResType,
I, CL::fmax, GL::FMax);
440 case TargetOpcode::G_FCOPYSIGN:
441 return selectExtInst(ResVReg, ResType,
I, CL::copysign);
443 case TargetOpcode::G_FCEIL:
444 return selectExtInst(ResVReg, ResType,
I, CL::ceil, GL::Ceil);
445 case TargetOpcode::G_FFLOOR:
446 return selectExtInst(ResVReg, ResType,
I, CL::floor, GL::Floor);
448 case TargetOpcode::G_FCOS:
449 return selectExtInst(ResVReg, ResType,
I, CL::cos, GL::Cos);
450 case TargetOpcode::G_FSIN:
451 return selectExtInst(ResVReg, ResType,
I, CL::sin, GL::Sin);
453 case TargetOpcode::G_FSQRT:
454 return selectExtInst(ResVReg, ResType,
I, CL::sqrt, GL::Sqrt);
456 case TargetOpcode::G_CTTZ:
457 case TargetOpcode::G_CTTZ_ZERO_UNDEF:
458 return selectExtInst(ResVReg, ResType,
I, CL::ctz);
459 case TargetOpcode::G_CTLZ:
460 case TargetOpcode::G_CTLZ_ZERO_UNDEF:
461 return selectExtInst(ResVReg, ResType,
I, CL::clz);
463 case TargetOpcode::G_INTRINSIC_ROUND:
464 return selectExtInst(ResVReg, ResType,
I, CL::round, GL::Round);
465 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
466 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
467 case TargetOpcode::G_INTRINSIC_TRUNC:
468 return selectExtInst(ResVReg, ResType,
I, CL::trunc, GL::Trunc);
469 case TargetOpcode::G_FRINT:
470 case TargetOpcode::G_FNEARBYINT:
471 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
473 case TargetOpcode::G_SMULH:
474 return selectExtInst(ResVReg, ResType,
I, CL::s_mul_hi);
475 case TargetOpcode::G_UMULH:
476 return selectExtInst(ResVReg, ResType,
I, CL::u_mul_hi);
478 case TargetOpcode::G_SEXT:
479 return selectExt(ResVReg, ResType,
I,
true);
480 case TargetOpcode::G_ANYEXT:
481 case TargetOpcode::G_ZEXT:
482 return selectExt(ResVReg, ResType,
I,
false);
483 case TargetOpcode::G_TRUNC:
484 return selectTrunc(ResVReg, ResType,
I);
485 case TargetOpcode::G_FPTRUNC:
486 case TargetOpcode::G_FPEXT:
487 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpFConvert);
489 case TargetOpcode::G_PTRTOINT:
490 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertPtrToU);
491 case TargetOpcode::G_INTTOPTR:
492 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertUToPtr);
493 case TargetOpcode::G_BITCAST:
494 return selectBitcast(ResVReg, ResType,
I);
495 case TargetOpcode::G_ADDRSPACE_CAST:
496 return selectAddrSpaceCast(ResVReg, ResType,
I);
497 case TargetOpcode::G_PTR_ADD: {
502 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
506 assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
507 (*II).getOpcode() == TargetOpcode::COPY ||
508 (*II).getOpcode() == SPIRV::OpVariable) &&
510 Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32,
I,
TII),
I);
512 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
514 .
addUse(GR.getSPIRVTypeID(ResType))
516 SPIRV::Opcode::InBoundsPtrAccessChain))
519 .
addUse(
I.getOperand(2).getReg());
523 case TargetOpcode::G_ATOMICRMW_OR:
524 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicOr);
525 case TargetOpcode::G_ATOMICRMW_ADD:
526 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicIAdd);
527 case TargetOpcode::G_ATOMICRMW_AND:
528 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicAnd);
529 case TargetOpcode::G_ATOMICRMW_MAX:
530 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMax);
531 case TargetOpcode::G_ATOMICRMW_MIN:
532 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMin);
533 case TargetOpcode::G_ATOMICRMW_SUB:
534 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicISub);
535 case TargetOpcode::G_ATOMICRMW_XOR:
536 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicXor);
537 case TargetOpcode::G_ATOMICRMW_UMAX:
538 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMax);
539 case TargetOpcode::G_ATOMICRMW_UMIN:
540 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMin);
541 case TargetOpcode::G_ATOMICRMW_XCHG:
542 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicExchange);
543 case TargetOpcode::G_ATOMIC_CMPXCHG:
544 return selectAtomicCmpXchg(ResVReg, ResType,
I);
546 case TargetOpcode::G_ATOMICRMW_FADD:
547 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT);
548 case TargetOpcode::G_ATOMICRMW_FSUB:
550 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT,
552 case TargetOpcode::G_ATOMICRMW_FMIN:
553 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMinEXT);
554 case TargetOpcode::G_ATOMICRMW_FMAX:
555 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMaxEXT);
557 case TargetOpcode::G_FENCE:
558 return selectFence(
I);
560 case TargetOpcode::G_STACKSAVE:
561 return selectStackSave(ResVReg, ResType,
I);
562 case TargetOpcode::G_STACKRESTORE:
563 return selectStackRestore(
I);
565 case TargetOpcode::G_UNMERGE_VALUES:
573bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
576 CL::OpenCLExtInst CLInst)
const {
577 return selectExtInst(ResVReg, ResType,
I,
578 {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
581bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
584 CL::OpenCLExtInst CLInst,
585 GL::GLSLExtInst GLInst)
const {
586 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
587 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
588 return selectExtInst(ResVReg, ResType,
I, ExtInsts);
591bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
596 for (
const auto &Ex : Insts) {
597 SPIRV::InstructionSet::InstructionSet Set = Ex.first;
599 if (STI.canUseExtInstSet(Set)) {
601 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
603 .
addUse(GR.getSPIRVTypeID(ResType))
606 const unsigned NumOps =
I.getNumOperands();
607 for (
unsigned i = 1; i < NumOps; ++i)
608 MIB.add(
I.getOperand(i));
609 return MIB.constrainAllUses(
TII,
TRI, RBI);
615bool SPIRVInstructionSelector::selectUnOpWithSrc(
Register ResVReg,
619 unsigned Opcode)
const {
620 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
622 .
addUse(GR.getSPIRVTypeID(ResType))
627bool SPIRVInstructionSelector::selectUnOp(
Register ResVReg,
630 unsigned Opcode)
const {
631 return selectUnOpWithSrc(ResVReg, ResType,
I,
I.getOperand(1).getReg(),
635bool SPIRVInstructionSelector::selectBitcast(
Register ResVReg,
638 Register OpReg =
I.getOperand(1).getReg();
639 SPIRVType *OpType = OpReg.
isValid() ? GR.getSPIRVTypeForVReg(OpReg) :
nullptr;
640 if (!GR.isBitcastCompatible(ResType, OpType))
642 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitcast);
648 return SPIRV::Scope::Invocation;
650 return SPIRV::Scope::Device;
652 return SPIRV::Scope::Workgroup;
654 return SPIRV::Scope::CrossDevice;
656 return SPIRV::Scope::Subgroup;
665 return SPIRV::Scope::Device;
671 if (
MemOp->isVolatile())
672 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
673 if (
MemOp->isNonTemporal())
674 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
675 if (
MemOp->getAlign().value())
676 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
678 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
680 if (SpvMemOp &
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
687 if (Flags & MachineMemOperand::Flags::MOVolatile)
688 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
689 if (Flags & MachineMemOperand::Flags::MONonTemporal)
690 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
692 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None))
696bool SPIRVInstructionSelector::selectLoad(
Register ResVReg,
699 unsigned OpOffset = isa<GIntrinsic>(
I) ? 1 : 0;
701 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
703 .
addUse(GR.getSPIRVTypeID(ResType))
705 if (!
I.getNumMemOperands()) {
706 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
708 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
713 return MIB.constrainAllUses(
TII,
TRI, RBI);
716bool SPIRVInstructionSelector::selectStore(
MachineInstr &
I)
const {
717 unsigned OpOffset = isa<GIntrinsic>(
I) ? 1 : 0;
718 Register StoreVal =
I.getOperand(0 + OpOffset).getReg();
721 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpStore))
724 if (!
I.getNumMemOperands()) {
725 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
727 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
732 return MIB.constrainAllUses(
TII,
TRI, RBI);
735bool SPIRVInstructionSelector::selectStackSave(
Register ResVReg,
738 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
740 "llvm.stacksave intrinsic: this instruction requires the following "
741 "SPIR-V extension: SPV_INTEL_variable_length_array",
744 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSaveMemoryINTEL))
746 .
addUse(GR.getSPIRVTypeID(ResType))
750bool SPIRVInstructionSelector::selectStackRestore(
MachineInstr &
I)
const {
751 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
753 "llvm.stackrestore intrinsic: this instruction requires the following "
754 "SPIR-V extension: SPV_INTEL_variable_length_array",
756 if (!
I.getOperand(0).isReg())
759 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpRestoreMemoryINTEL))
760 .
addUse(
I.getOperand(0).getReg())
764bool SPIRVInstructionSelector::selectMemOperation(
Register ResVReg,
767 Register SrcReg =
I.getOperand(1).getReg();
768 if (
I.getOpcode() == TargetOpcode::G_MEMSET) {
769 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
772 SPIRVType *ValTy = GR.getOrCreateSPIRVIntegerType(8,
I,
TII);
773 SPIRVType *ArrTy = GR.getOrCreateSPIRVArrayType(ValTy, Num,
I,
TII);
775 SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType(
776 ArrTy,
I,
TII, SPIRV::StorageClass::UniformConstant);
786 GR.add(GV, GR.CurMF, VarReg);
789 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVariable))
791 .
addUse(GR.getSPIRVTypeID(VarTy))
792 .
addImm(SPIRV::StorageClass::UniformConstant)
795 SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType(
796 ValTy,
I,
TII, SPIRV::StorageClass::UniformConstant);
798 selectUnOpWithSrc(SrcReg, SourceTy,
I, VarReg, SPIRV::OpBitcast);
800 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCopyMemorySized))
801 .
addUse(
I.getOperand(0).getReg())
803 .
addUse(
I.getOperand(2).getReg());
804 if (
I.getNumMemOperands())
807 if (ResVReg.
isValid() && ResVReg != MIB->getOperand(0).getReg())
808 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(TargetOpcode::COPY), ResVReg)
809 .
addUse(MIB->getOperand(0).getReg());
813bool SPIRVInstructionSelector::selectAtomicRMW(
Register ResVReg,
817 unsigned NegateOpcode)
const {
822 Register ScopeReg = buildI32Constant(Scope,
I);
830 Register MemSemReg = buildI32Constant(MemSem ,
I);
833 Register ValueReg =
I.getOperand(2).getReg();
834 if (NegateOpcode != 0) {
836 Register TmpReg =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
837 Result |= selectUnOpWithSrc(TmpReg, ResType,
I, ValueReg, NegateOpcode);
843 .
addUse(GR.getSPIRVTypeID(ResType))
852bool SPIRVInstructionSelector::selectUnmergeValues(
MachineInstr &
I)
const {
853 unsigned ArgI =
I.getNumOperands() - 1;
855 I.getOperand(ArgI).isReg() ?
I.getOperand(ArgI).getReg() :
Register(0);
857 SrcReg.
isValid() ? GR.getSPIRVTypeForVReg(SrcReg) :
nullptr;
858 if (!DefType || DefType->
getOpcode() != SPIRV::OpTypeVector)
860 "cannot select G_UNMERGE_VALUES with a non-vector argument");
866 for (
unsigned i = 0; i <
I.getNumDefs(); ++i) {
867 Register ResVReg =
I.getOperand(i).getReg();
868 SPIRVType *ResType = GR.getSPIRVTypeForVReg(ResVReg);
871 ResType = ScalarType;
872 MRI->setRegClass(ResVReg, &SPIRV::IDRegClass);
873 MRI->setType(ResVReg,
LLT::scalar(GR.getScalarOrVectorBitWidth(ResType)));
874 GR.assignSPIRVTypeToVReg(ResType, ResVReg, *GR.CurMF);
877 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
879 .
addUse(GR.getSPIRVTypeID(ResType))
881 .
addImm(
static_cast<int64_t
>(i));
882 Res |= MIB.constrainAllUses(
TII,
TRI, RBI);
887bool SPIRVInstructionSelector::selectFence(
MachineInstr &
I)
const {
890 Register MemSemReg = buildI32Constant(MemSem,
I);
893 Register ScopeReg = buildI32Constant(Scope,
I);
895 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpMemoryBarrier))
901bool SPIRVInstructionSelector::selectAtomicCmpXchg(
Register ResVReg,
908 if (!isa<GIntrinsic>(
I)) {
913 ScopeReg = buildI32Constant(Scope,
I);
915 unsigned ScSem =
static_cast<uint32_t>(
919 MemSemEqReg = buildI32Constant(MemSemEq,
I);
923 MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq,
I);
925 ScopeReg =
I.getOperand(5).getReg();
926 MemSemEqReg =
I.getOperand(6).getReg();
927 MemSemNeqReg =
I.getOperand(7).getReg();
932 SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
933 Register ACmpRes =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
936 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpAtomicCompareExchange))
938 .
addUse(GR.getSPIRVTypeID(SpvValTy))
946 Register CmpSuccReg =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
950 .
addUse(GR.getSPIRVTypeID(BoolTy))
954 Register TmpReg =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
957 .
addUse(GR.getSPIRVTypeID(ResType))
964 .
addUse(GR.getSPIRVTypeID(ResType))
974 case SPIRV::StorageClass::Workgroup:
975 case SPIRV::StorageClass::CrossWorkgroup:
976 case SPIRV::StorageClass::Function:
985 case SPIRV::StorageClass::DeviceOnlyINTEL:
986 case SPIRV::StorageClass::HostOnlyINTEL:
998bool SPIRVInstructionSelector::selectAddrSpaceCast(
Register ResVReg,
1003 auto UIs =
MRI->use_instructions(ResVReg);
1004 if (!UIs.empty() && ++UIs.begin() == UIs.end() &&
1005 (UIs.begin()->getOpcode() == SPIRV::OpConstantComposite ||
1006 UIs.begin()->getOpcode() == SPIRV::OpVariable ||
1008 Register NewReg =
I.getOperand(1).getReg();
1010 SPIRVType *SpvBaseTy = GR.getOrCreateSPIRVIntegerType(8,
I,
TII);
1011 ResType = GR.getOrCreateSPIRVPointerType(SpvBaseTy,
I,
TII,
1012 SPIRV::StorageClass::Generic);
1014 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
1016 .
addUse(GR.getSPIRVTypeID(ResType))
1022 Register SrcPtr =
I.getOperand(1).getReg();
1023 SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
1024 SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtr);
1025 SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResVReg);
1033 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
1036 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
1039 Register Tmp =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1040 SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
1041 SrcPtrTy,
I,
TII, SPIRV::StorageClass::Generic);
1046 .
addUse(GR.getSPIRVTypeID(GenericPtrTy))
1051 .
addUse(GR.getSPIRVTypeID(ResType))
1059 return selectUnOp(ResVReg, ResType,
I,
1060 SPIRV::OpPtrCastToCrossWorkgroupINTEL);
1062 return selectUnOp(ResVReg, ResType,
I,
1063 SPIRV::OpCrossWorkgroupCastToPtrINTEL);
1067 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitcast);
1074 return SPIRV::OpFOrdEqual;
1076 return SPIRV::OpFOrdGreaterThanEqual;
1078 return SPIRV::OpFOrdGreaterThan;
1080 return SPIRV::OpFOrdLessThanEqual;
1082 return SPIRV::OpFOrdLessThan;
1084 return SPIRV::OpFOrdNotEqual;
1086 return SPIRV::OpOrdered;
1088 return SPIRV::OpFUnordEqual;
1090 return SPIRV::OpFUnordGreaterThanEqual;
1092 return SPIRV::OpFUnordGreaterThan;
1094 return SPIRV::OpFUnordLessThanEqual;
1096 return SPIRV::OpFUnordLessThan;
1098 return SPIRV::OpFUnordNotEqual;
1100 return SPIRV::OpUnordered;
1110 return SPIRV::OpIEqual;
1112 return SPIRV::OpINotEqual;
1114 return SPIRV::OpSGreaterThanEqual;
1116 return SPIRV::OpSGreaterThan;
1118 return SPIRV::OpSLessThanEqual;
1120 return SPIRV::OpSLessThan;
1122 return SPIRV::OpUGreaterThanEqual;
1124 return SPIRV::OpUGreaterThan;
1126 return SPIRV::OpULessThanEqual;
1128 return SPIRV::OpULessThan;
1137 return SPIRV::OpPtrEqual;
1139 return SPIRV::OpPtrNotEqual;
1150 return SPIRV::OpLogicalEqual;
1152 return SPIRV::OpLogicalNotEqual;
1158bool SPIRVInstructionSelector::selectBitreverse(
Register ResVReg,
1162 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitReverse))
1164 .
addUse(GR.getSPIRVTypeID(ResType))
1165 .
addUse(
I.getOperand(1).getReg())
1169bool SPIRVInstructionSelector::selectFreeze(
Register ResVReg,
1177 if (!
I.getOperand(0).isReg() || !
I.getOperand(1).isReg())
1179 Register OpReg =
I.getOperand(1).getReg();
1182 switch (
Def->getOpcode()) {
1183 case SPIRV::ASSIGN_TYPE:
1185 MRI->getVRegDef(
Def->getOperand(1).getReg())) {
1186 if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1187 Reg =
Def->getOperand(2).getReg();
1190 case SPIRV::OpUndef:
1191 Reg =
Def->getOperand(1).getReg();
1194 unsigned DestOpCode;
1195 if (
Reg.isValid()) {
1196 DestOpCode = SPIRV::OpConstantNull;
1198 DestOpCode = TargetOpcode::COPY;
1201 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(DestOpCode))
1202 .
addDef(
I.getOperand(0).getReg())
1209bool SPIRVInstructionSelector::selectConstVector(
Register ResVReg,
1219 SPIRVType *ConstTy = this->MRI->getVRegDef(MO.getReg());
1220 assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
1221 ConstTy->getOperand(1).isReg());
1222 Register ConstReg = ConstTy->getOperand(1).getReg();
1223 const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
1225 return (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
1226 Const->getOpcode() == TargetOpcode::G_FCONSTANT);
1229 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1230 TII.get(SPIRV::OpConstantComposite))
1232 .
addUse(GR.getSPIRVTypeID(ResType));
1233 for (
unsigned i =
I.getNumExplicitDefs(); i <
I.getNumExplicitOperands(); ++i)
1234 MIB.
addUse(
I.getOperand(i).getReg());
1244 if (OpDef->
getOpcode() == SPIRV::ASSIGN_TYPE &&
1249 unsigned N = OpDef->
getOpcode() == TargetOpcode::G_CONSTANT
1257 if (OpDef->
getOpcode() == SPIRV::ASSIGN_TYPE &&
1262 return OpDef->
getOpcode() == TargetOpcode::G_CONSTANT ||
1263 OpDef->
getOpcode() == TargetOpcode::G_FCONSTANT;
1273bool SPIRVInstructionSelector::selectSplatVector(
Register ResVReg,
1277 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
1278 N = GR.getScalarOrVectorComponentCount(ResType);
1279 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
1284 unsigned OpIdx =
I.getNumExplicitDefs();
1285 if (!
I.getOperand(OpIdx).isReg())
1289 Register OpReg =
I.getOperand(OpIdx).getReg();
1292 if (!IsConst &&
N < 2)
1294 "There must be at least two constituent operands in a vector");
1296 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1297 TII.get(IsConst ? SPIRV::OpConstantComposite
1298 : SPIRV::OpCompositeConstruct))
1300 .
addUse(GR.getSPIRVTypeID(ResType));
1301 for (
unsigned i = 0; i <
N; ++i)
1306bool SPIRVInstructionSelector::selectCmp(
Register ResVReg,
1310 Register Cmp0 =
I.getOperand(2).getReg();
1311 Register Cmp1 =
I.getOperand(3).getReg();
1312 assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
1313 GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
1314 "CMP operands should have the same type");
1315 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(CmpOpc))
1317 .
addUse(GR.getSPIRVTypeID(ResType))
1323bool SPIRVInstructionSelector::selectICmp(
Register ResVReg,
1326 auto Pred =
I.getOperand(1).getPredicate();
1329 Register CmpOperand =
I.getOperand(2).getReg();
1330 if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
1332 else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
1336 return selectCmp(ResVReg, ResType, CmpOpc,
I);
1342 assert(
I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
1343 "Expected G_FCONSTANT");
1344 const ConstantFP *FPImm =
I.getOperand(1).getFPImm();
1351 assert(
I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1352 "Expected G_CONSTANT");
1353 addNumImm(
I.getOperand(1).getCImm()->getValue(), MIB);
1361 ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32,
I,
TII);
1363 auto ConstInt = ConstantInt::get(LLVMTy, Val);
1364 Register NewReg = GR.find(ConstInt, GR.CurMF);
1367 GR.add(ConstInt, GR.CurMF, NewReg);
1371 MI =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
1373 .
addUse(GR.getSPIRVTypeID(SpvI32Ty));
1375 MI =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantI))
1377 .
addUse(GR.getSPIRVTypeID(SpvI32Ty))
1385bool SPIRVInstructionSelector::selectFCmp(
Register ResVReg,
1389 return selectCmp(ResVReg, ResType, CmpOp,
I);
1394 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
1395 return GR.getOrCreateConsIntVector(0,
I, ResType,
TII);
1396 return GR.getOrCreateConstInt(0,
I, ResType,
TII);
1402 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1405 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
1410bool SPIRVInstructionSelector::selectSelect(
Register ResVReg,
1413 bool IsSigned)
const {
1415 Register ZeroReg = buildZerosVal(ResType,
I);
1416 Register OneReg = buildOnesVal(IsSigned, ResType,
I);
1418 GR.isScalarOfType(
I.getOperand(1).getReg(), SPIRV::OpTypeBool);
1420 IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectSIVCond;
1421 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
1423 .
addUse(GR.getSPIRVTypeID(ResType))
1424 .
addUse(
I.getOperand(1).getReg())
1430bool SPIRVInstructionSelector::selectIToF(
Register ResVReg,
1433 unsigned Opcode)
const {
1434 Register SrcReg =
I.getOperand(1).getReg();
1437 if (GR.isScalarOrVectorOfType(
I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
1438 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1440 if (ResType->
getOpcode() == SPIRV::OpTypeVector) {
1442 TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts,
I,
TII);
1444 SrcReg =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1445 selectSelect(SrcReg, TmpType,
I,
false);
1447 return selectUnOpWithSrc(ResVReg, ResType,
I, SrcReg, Opcode);
1450bool SPIRVInstructionSelector::selectExt(
Register ResVReg,
1453 if (GR.isScalarOrVectorOfType(
I.getOperand(1).getReg(), SPIRV::OpTypeBool))
1454 return selectSelect(ResVReg, ResType,
I, IsSigned);
1455 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1456 return selectUnOp(ResVReg, ResType,
I, Opcode);
1459bool SPIRVInstructionSelector::selectIntToBool(
Register IntReg,
1465 Register BitIntReg =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1466 bool IsVectorTy = IntTy->
getOpcode() == SPIRV::OpTypeVector;
1467 unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
1469 Register One = buildOnesVal(
false, IntTy,
I);
1473 .
addUse(GR.getSPIRVTypeID(IntTy))
1477 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpINotEqual))
1479 .
addUse(GR.getSPIRVTypeID(BoolTy))
1485bool SPIRVInstructionSelector::selectTrunc(
Register ResVReg,
1488 if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool)) {
1489 Register IntReg =
I.getOperand(1).getReg();
1490 const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
1491 return selectIntToBool(IntReg, ResVReg,
I, ArgType, ResType);
1493 bool IsSigned = GR.isScalarOrVectorSigned(ResType);
1494 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1495 return selectUnOp(ResVReg, ResType,
I, Opcode);
1498bool SPIRVInstructionSelector::selectConst(
Register ResVReg,
1502 unsigned TyOpcode = ResType->
getOpcode();
1503 assert(TyOpcode != SPIRV::OpTypePointer ||
Imm.isZero());
1505 if ((TyOpcode == SPIRV::OpTypePointer || TyOpcode == SPIRV::OpTypeEvent) &&
1507 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
1509 .
addUse(GR.getSPIRVTypeID(ResType))
1511 if (TyOpcode == SPIRV::OpTypeInt) {
1512 assert(
Imm.getBitWidth() <= 64 &&
"Unsupported integer width!");
1516 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(TargetOpcode::COPY))
1521 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantI))
1523 .
addUse(GR.getSPIRVTypeID(ResType));
1530bool SPIRVInstructionSelector::selectOpUndef(
Register ResVReg,
1533 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
1535 .
addUse(GR.getSPIRVTypeID(ResType))
1542 if (TypeInst->
getOpcode() != SPIRV::ASSIGN_TYPE)
1546 return ImmInst->
getOpcode() == TargetOpcode::G_CONSTANT;
1556bool SPIRVInstructionSelector::selectInsertVal(
Register ResVReg,
1560 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeInsert))
1562 .
addUse(GR.getSPIRVTypeID(ResType))
1564 .
addUse(
I.getOperand(3).getReg())
1566 .
addUse(
I.getOperand(2).getReg());
1567 for (
unsigned i = 4; i <
I.getNumOperands(); i++)
1572bool SPIRVInstructionSelector::selectExtractVal(
Register ResVReg,
1576 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1578 .
addUse(GR.getSPIRVTypeID(ResType))
1579 .
addUse(
I.getOperand(2).getReg());
1580 for (
unsigned i = 3; i <
I.getNumOperands(); i++)
1585bool SPIRVInstructionSelector::selectInsertElt(
Register ResVReg,
1589 return selectInsertVal(ResVReg, ResType,
I);
1591 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorInsertDynamic))
1593 .
addUse(GR.getSPIRVTypeID(ResType))
1594 .
addUse(
I.getOperand(2).getReg())
1595 .
addUse(
I.getOperand(3).getReg())
1596 .
addUse(
I.getOperand(4).getReg())
1600bool SPIRVInstructionSelector::selectExtractElt(
Register ResVReg,
1604 return selectExtractVal(ResVReg, ResType,
I);
1606 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorExtractDynamic))
1608 .
addUse(GR.getSPIRVTypeID(ResType))
1609 .
addUse(
I.getOperand(2).getReg())
1610 .
addUse(
I.getOperand(3).getReg())
1614bool SPIRVInstructionSelector::selectGEP(
Register ResVReg,
1617 const bool IsGEPInBounds =
I.getOperand(2).getImm();
1622 const unsigned Opcode = STI.isVulkanEnv()
1623 ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
1624 : SPIRV::OpAccessChain)
1625 : (IsGEPInBounds ? SPIRV::OpInBoundsPtrAccessChain
1626 : SPIRV::OpPtrAccessChain);
1628 auto Res =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
1630 .
addUse(GR.getSPIRVTypeID(ResType))
1632 .
addUse(
I.getOperand(3).getReg());
1634 const unsigned StartingIndex =
1635 (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
1638 for (
unsigned i = StartingIndex; i <
I.getNumExplicitOperands(); ++i)
1639 Res.addUse(
I.getOperand(i).getReg());
1640 return Res.constrainAllUses(
TII,
TRI, RBI);
1644bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
1647 unsigned Lim =
I.getNumExplicitOperands();
1648 for (
unsigned i =
I.getNumExplicitDefs() + 1; i < Lim; ++i) {
1649 Register OpReg =
I.getOperand(i).getReg();
1651 SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
1653 OpDefine->
getOpcode() == TargetOpcode::G_ADDRSPACE_CAST) {
1660 Register WrapReg = GR.find(OpDefine, MF);
1666 WrapReg =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1667 GR.add(OpDefine, MF, WrapReg);
1671 GR.assignSPIRVTypeToVReg(OpType, WrapReg, *MF);
1675 .
addUse(GR.getSPIRVTypeID(OpType))
1685bool SPIRVInstructionSelector::selectIntrinsic(
Register ResVReg,
1691 case Intrinsic::spv_load:
1692 return selectLoad(ResVReg, ResType,
I);
1693 case Intrinsic::spv_store:
1694 return selectStore(
I);
1695 case Intrinsic::spv_extractv:
1696 return selectExtractVal(ResVReg, ResType,
I);
1697 case Intrinsic::spv_insertv:
1698 return selectInsertVal(ResVReg, ResType,
I);
1699 case Intrinsic::spv_extractelt:
1700 return selectExtractElt(ResVReg, ResType,
I);
1701 case Intrinsic::spv_insertelt:
1702 return selectInsertElt(ResVReg, ResType,
I);
1703 case Intrinsic::spv_gep:
1704 return selectGEP(ResVReg, ResType,
I);
1705 case Intrinsic::spv_unref_global:
1706 case Intrinsic::spv_init_global: {
1709 ?
MRI->getVRegDef(
I.getOperand(2).getReg())
1712 return selectGlobalValue(
MI->getOperand(0).getReg(), *
MI,
Init);
1714 case Intrinsic::spv_undef: {
1715 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
1717 .
addUse(GR.getSPIRVTypeID(ResType));
1720 case Intrinsic::spv_const_composite: {
1722 bool IsNull =
I.getNumExplicitDefs() + 1 ==
I.getNumExplicitOperands();
1724 unsigned Opcode = SPIRV::OpConstantNull;
1727 Opcode = SPIRV::OpConstantComposite;
1728 if (!wrapIntoSpecConstantOp(
I, CompositeArgs))
1731 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
1733 .
addUse(GR.getSPIRVTypeID(ResType));
1736 for (
Register OpReg : CompositeArgs)
1741 case Intrinsic::spv_assign_name: {
1742 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpName));
1743 MIB.
addUse(
I.getOperand(
I.getNumExplicitDefs() + 1).getReg());
1744 for (
unsigned i =
I.getNumExplicitDefs() + 2;
1745 i <
I.getNumExplicitOperands(); ++i) {
1746 MIB.
addImm(
I.getOperand(i).getImm());
1750 case Intrinsic::spv_switch: {
1751 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSwitch));
1752 for (
unsigned i = 1; i <
I.getNumExplicitOperands(); ++i) {
1753 if (
I.getOperand(i).isReg())
1754 MIB.
addReg(
I.getOperand(i).getReg());
1755 else if (
I.getOperand(i).isCImm())
1756 addNumImm(
I.getOperand(i).getCImm()->getValue(), MIB);
1757 else if (
I.getOperand(i).isMBB())
1758 MIB.
addMBB(
I.getOperand(i).getMBB());
1764 case Intrinsic::spv_cmpxchg:
1765 return selectAtomicCmpXchg(ResVReg, ResType,
I);
1766 case Intrinsic::spv_unreachable:
1767 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUnreachable));
1769 case Intrinsic::spv_alloca:
1770 return selectFrameIndex(ResVReg, ResType,
I);
1771 case Intrinsic::spv_alloca_array:
1772 return selectAllocaArray(ResVReg, ResType,
I);
1773 case Intrinsic::spv_assume:
1774 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
1775 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpAssumeTrueKHR))
1776 .
addUse(
I.getOperand(1).getReg());
1778 case Intrinsic::spv_expect:
1779 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
1780 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExpectKHR))
1782 .
addUse(GR.getSPIRVTypeID(ResType))
1783 .
addUse(
I.getOperand(2).getReg())
1784 .
addUse(
I.getOperand(3).getReg());
1786 case Intrinsic::spv_thread_id:
1787 return selectSpvThreadId(ResVReg, ResType,
I);
1788 case Intrinsic::spv_lifetime_start:
1789 case Intrinsic::spv_lifetime_end: {
1790 unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
1791 : SPIRV::OpLifetimeStop;
1792 int64_t
Size =
I.getOperand(
I.getNumExplicitDefs() + 1).getImm();
1793 Register PtrReg =
I.getOperand(
I.getNumExplicitDefs() + 2).getReg();
1794 unsigned PonteeOpType = GR.getPointeeTypeOp(PtrReg);
1795 bool IsNonvoidPtr = PonteeOpType != 0 && PonteeOpType != SPIRV::OpTypeVoid;
1796 if (
Size == -1 || IsNonvoidPtr)
1801 std::string DiagMsg;
1804 DiagMsg =
"Intrinsic selection not implemented: " + DiagMsg;
1811bool SPIRVInstructionSelector::selectAllocaArray(
Register ResVReg,
1818 TII.get(SPIRV::OpVariableLengthArrayINTEL))
1820 .
addUse(GR.getSPIRVTypeID(ResType))
1821 .
addUse(
I.getOperand(2).getReg())
1825bool SPIRVInstructionSelector::selectFrameIndex(
Register ResVReg,
1828 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVariable))
1830 .
addUse(GR.getSPIRVTypeID(ResType))
1835bool SPIRVInstructionSelector::selectBranch(
MachineInstr &
I)
const {
1842 if (PrevI !=
nullptr && PrevI->
getOpcode() == TargetOpcode::G_BRCOND) {
1843 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
1846 .
addMBB(
I.getOperand(0).getMBB())
1850 .
addMBB(
I.getOperand(0).getMBB())
1854bool SPIRVInstructionSelector::selectBranchCond(
MachineInstr &
I)
const {
1867 if (NextI !=
nullptr && NextI->
getOpcode() == SPIRV::OpBranchConditional)
1874 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
1875 .
addUse(
I.getOperand(0).getReg())
1876 .
addMBB(
I.getOperand(1).getMBB())
1881bool SPIRVInstructionSelector::selectPhi(
Register ResVReg,
1884 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpPhi))
1886 .
addUse(GR.getSPIRVTypeID(ResType));
1887 const unsigned NumOps =
I.getNumOperands();
1888 for (
unsigned i = 1; i < NumOps; i += 2) {
1889 MIB.
addUse(
I.getOperand(i + 0).getReg());
1890 MIB.
addMBB(
I.getOperand(i + 1).getMBB());
1895bool SPIRVInstructionSelector::selectGlobalValue(
1905 SPIRV::AccessQualifier::ReadWrite,
false);
1906 PointerBaseType = GR.getOrCreateSPIRVArrayType(
1909 PointerBaseType = GR.getOrCreateSPIRVType(
1910 GVType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
false);
1912 SPIRVType *ResType = GR.getOrCreateSPIRVPointerType(
1913 PointerBaseType,
I,
TII,
1916 std::string GlobalIdent;
1918 unsigned &
ID = UnnamedGlobalIDs[GV];
1920 ID = UnnamedGlobalIDs.size();
1921 GlobalIdent =
"__unnamed_" +
Twine(
ID).
str();
1936 if (isa<Function>(GV)) {
1939 Register NewReg = GR.find(ConstVal, GR.CurMF);
1942 GR.add(ConstVal, GR.CurMF, NewReg);
1944 STI.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)
1945 ? dyn_cast<Function>(GV)
1953 MRI->setRegClass(FuncVReg, &SPIRV::IDRegClass);
1956 TII.get(SPIRV::OpConstantFunctionPointerINTEL))
1958 .
addUse(GR.getSPIRVTypeID(ResType))
1964 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
1966 .
addUse(GR.getSPIRVTypeID(ResType))
1969 assert(NewReg != ResVReg);
1970 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(TargetOpcode::COPY))
1975 auto GlobalVar = cast<GlobalVariable>(GV);
1978 bool HasInit =
GlobalVar->hasInitializer() &&
1979 !isa<UndefValue>(
GlobalVar->getInitializer());
1982 if (HasInit && !
Init)
1986 SPIRV::StorageClass::StorageClass Storage =
1989 Storage != SPIRV::StorageClass::Function;
1990 SPIRV::LinkageType::LinkageType LnkType =
1992 ? SPIRV::LinkageType::Import
1994 STI.canUseExtension(SPIRV::Extension::SPV_KHR_linkonce_odr)
1995 ? SPIRV::LinkageType::LinkOnceODR
1996 : SPIRV::LinkageType::Export);
1998 Register Reg = GR.buildGlobalVariable(ResVReg, ResType, GlobalIdent, GV,
2000 HasLnkTy, LnkType, MIRBuilder,
true);
2001 return Reg.isValid();
2004bool SPIRVInstructionSelector::selectLog10(
Register ResVReg,
2007 if (STI.canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std)) {
2008 return selectExtInst(ResVReg, ResType,
I, CL::log10);
2020 Register VarReg =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2022 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
2024 .
addUse(GR.getSPIRVTypeID(ResType))
2025 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
2027 .
add(
I.getOperand(1))
2032 ResType->
getOpcode() == SPIRV::OpTypeFloat);
2035 ResType->
getOpcode() == SPIRV::OpTypeVector
2039 GR.buildConstantFP(
APFloat(0.30103f), MIRBuilder, SpirvScalarType);
2042 auto Opcode = ResType->
getOpcode() == SPIRV::OpTypeVector
2043 ? SPIRV::OpVectorTimesScalar
2047 .
addUse(GR.getSPIRVTypeID(ResType))
2055bool SPIRVInstructionSelector::selectSpvThreadId(
Register ResVReg,
2063 const SPIRVType *U32Type = GR.getOrCreateSPIRVIntegerType(32, MIRBuilder);
2065 GR.getOrCreateSPIRVVectorType(U32Type, 3, MIRBuilder);
2066 const SPIRVType *PtrType = GR.getOrCreateSPIRVPointerType(
2067 Vec3Ty, MIRBuilder, SPIRV::StorageClass::Input);
2071 MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::IDRegClass);
2072 MIRBuilder.getMRI()->setType(NewRegister,
LLT::pointer(0, 32));
2073 GR.assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
2076 Register Variable = GR.buildGlobalVariable(
2077 NewRegister, PtrType,
2079 SPIRV::StorageClass::Input,
nullptr,
true,
true,
2080 SPIRV::LinkageType::Import, MIRBuilder,
false);
2084 Register LoadedRegister =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2085 MIRBuilder.getMRI()->setType(LoadedRegister,
LLT::pointer(0, 32));
2086 GR.assignSPIRVTypeToVReg(Vec3Ty, LoadedRegister, MIRBuilder.getMF());
2089 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
2091 .
addUse(GR.getSPIRVTypeID(Vec3Ty))
2096 assert(
I.getOperand(2).isReg());
2097 Register ThreadIdReg =
I.getOperand(2).getReg();
2103 assert(Const &&
Const->getOpcode() == TargetOpcode::G_CONSTANT);
2109 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
2111 .
addUse(GR.getSPIRVTypeID(ResType))
2122 return new SPIRVInstructionSelector(
TM, Subtarget, RBI);
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
unsigned const TargetRegisterInfo * TRI
const char LLVMTargetMachineRef TM
static StringRef getName(Value *V)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
std::vector< std::pair< SPIRV::InstructionSet::InstructionSet, uint32_t > > ExtInstList
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC)
static void addMemoryOperands(MachineMemOperand *MemOp, MachineInstrBuilder &MIB)
static unsigned getFCmpOpcode(unsigned PredNum)
bool isTypeFoldingSupported(unsigned Opcode)
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static SPIRV::Scope::Scope getScope(SyncScope::ID Ord, SPIRVMachineModuleInfo *MMI)
static unsigned getBoolCmpOpcode(unsigned PredNum)
static unsigned getICmpOpcode(unsigned PredNum)
static bool isConstReg(MachineRegisterInfo *MRI, SPIRVType *OpDef)
static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
static unsigned getPtrCmpOpcode(unsigned Pred)
static unsigned getArrayComponentCount(MachineRegisterInfo *MRI, const SPIRVType *ResType)
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
uint64_t getZExtValue() const
Get zero extended value.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
This is an important base class in LLVM.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This class represents an Operation in the Expression.
const Function & getFunction() const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
LinkageTypes getLinkage() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
static std::string getGlobalIdentifier(StringRef Name, GlobalValue::LinkageTypes Linkage, StringRef FileName)
Return the modified name for a global value suitable to be used as the key for a global lookup (e....
bool hasAvailableExternallyLinkage() const
@ InternalLinkage
Rename collisions when linking (static functions).
@ LinkOnceODRLinkage
Same, but only replaced by something equivalent.
Type * getValueType() const
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
This is an important class for using LLVM in a threaded context.
SyncScope::ID getOrInsertSyncScopeID(StringRef SSN)
getOrInsertSyncScopeID - Maps synchronization scope name to synchronization scope ID.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
MachineModuleInfo & getMMI() const
Helper class to build MachineInstr.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
This class can be derived from and used by targets to hold private target-specific information for ea...
This class contains meta information specific to a module.
const Module * getModule() const
Ty & getObjFileInfo()
Keep track of various per-module pieces of information for backends that would like to do so.
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
Register getReg() const
getReg - Returns the register number.
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
LLVMContext & getContext() const
Get the global data context.
Analysis providing profile information.
Holds all the information related to register banks.
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
SyncScope::ID SubGroupSSID
SPIRVMachineModuleInfo(const MachineModuleInfo &MMI)
SyncScope::ID AllSVMDevicesSSID
SyncScope::ID Work_ItemSSID
SyncScope::ID WorkGroupSSID
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
std::string str() const
Return the twine contents as a std::string.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isArrayTy() const
True if this is an instance of ArrayType.
Type * getArrayElementType() const
uint64_t getArrayNumElements() const
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
@ System
Synchronized with respect to all concurrently executing threads.
Reg
All possible values of the reg field in the ModR/M byte.
Scope
Defines the scope in which this symbol should be visible: Default – Visible in the public interface o...
NodeAddr< DefNode * > Def
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
AtomicOrdering
Atomic ordering for LLVM's memory model.
InstructionSelector * createSPIRVInstructionSelector(const SPIRVTargetMachine &TM, const SPIRVSubtarget &Subtarget, const RegisterBankInfo &RBI)
constexpr unsigned BitWidth
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)