32#include "llvm/IR/IntrinsicsSPIRV.h"
57#define DEBUG_TYPE "spirv-isel"
60namespace CL = SPIRV::OpenCLExtInst;
61namespace GL = SPIRV::GLSLExtInst;
64 std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
68#define GET_GLOBALISEL_PREDICATE_BITSET
69#include "SPIRVGenGlobalISel.inc"
70#undef GET_GLOBALISEL_PREDICATE_BITSET
96#define GET_GLOBALISEL_PREDICATES_DECL
97#include "SPIRVGenGlobalISel.inc"
98#undef GET_GLOBALISEL_PREDICATES_DECL
100#define GET_GLOBALISEL_TEMPORARIES_DECL
101#include "SPIRVGenGlobalISel.inc"
102#undef GET_GLOBALISEL_TEMPORARIES_DECL
119 unsigned Opcode)
const;
121 unsigned Opcode)
const;
138 unsigned NegateOpcode = 0)
const;
185 bool IsSigned)
const;
187 bool IsSigned,
unsigned Opcode)
const;
189 bool IsSigned)
const;
229 GL::GLSLExtInst GLInst)
const;
242 const SPIRVType *ResType =
nullptr)
const;
255#define GET_GLOBALISEL_IMPL
256#include "SPIRVGenGlobalISel.inc"
257#undef GET_GLOBALISEL_IMPL
263 TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
265#include
"SPIRVGenGlobalISel.inc"
268#include
"SPIRVGenGlobalISel.inc"
279 GR.setCurrentFunc(MF);
280 InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
289 assert(
I.getParent() &&
"Instruction should be in a basic block!");
290 assert(
I.getParent()->getParent() &&
"Instruction should be in a function!");
295 if (Opcode == SPIRV::ASSIGN_TYPE) {
296 Register DstReg =
I.getOperand(0).getReg();
297 Register SrcReg =
I.getOperand(1).getReg();
298 auto *
Def =
MRI->getVRegDef(SrcReg);
300 if (
MRI->getType(DstReg).isPointer())
302 bool Res = selectImpl(
I, *CoverageInfo);
303 assert(Res ||
Def->getOpcode() == TargetOpcode::G_CONSTANT);
307 MRI->replaceRegWith(SrcReg, DstReg);
308 I.removeFromParent();
310 }
else if (
I.getNumDefs() == 1) {
317 if (
I.getNumOperands() !=
I.getNumExplicitOperands()) {
318 LLVM_DEBUG(
errs() <<
"Generic instr has unexpected implicit operands\n");
324 bool HasDefs =
I.getNumDefs() > 0;
326 SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) :
nullptr;
327 assert(!HasDefs || ResType ||
I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
328 if (spvSelect(ResVReg, ResType,
I)) {
330 for (
unsigned i = 0; i <
I.getNumDefs(); ++i)
332 I.removeFromParent();
338bool SPIRVInstructionSelector::spvSelect(
Register ResVReg,
341 const unsigned Opcode =
I.getOpcode();
343 return selectImpl(
I, *CoverageInfo);
345 case TargetOpcode::G_CONSTANT:
346 return selectConst(ResVReg, ResType,
I.getOperand(1).getCImm()->getValue(),
348 case TargetOpcode::G_GLOBAL_VALUE:
349 return selectGlobalValue(ResVReg,
I);
350 case TargetOpcode::G_IMPLICIT_DEF:
351 return selectOpUndef(ResVReg, ResType,
I);
352 case TargetOpcode::G_FREEZE:
353 return selectFreeze(ResVReg, ResType,
I);
355 case TargetOpcode::G_INTRINSIC:
356 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
357 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
358 return selectIntrinsic(ResVReg, ResType,
I);
359 case TargetOpcode::G_BITREVERSE:
360 return selectBitreverse(ResVReg, ResType,
I);
362 case TargetOpcode::G_BUILD_VECTOR:
363 return selectConstVector(ResVReg, ResType,
I);
364 case TargetOpcode::G_SPLAT_VECTOR:
365 return selectSplatVector(ResVReg, ResType,
I);
367 case TargetOpcode::G_SHUFFLE_VECTOR: {
369 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorShuffle))
371 .
addUse(GR.getSPIRVTypeID(ResType))
372 .
addUse(
I.getOperand(1).getReg())
373 .
addUse(
I.getOperand(2).getReg());
374 for (
auto V :
I.getOperand(3).getShuffleMask())
378 case TargetOpcode::G_MEMMOVE:
379 case TargetOpcode::G_MEMCPY:
380 case TargetOpcode::G_MEMSET:
381 return selectMemOperation(ResVReg,
I);
383 case TargetOpcode::G_ICMP:
384 return selectICmp(ResVReg, ResType,
I);
385 case TargetOpcode::G_FCMP:
386 return selectFCmp(ResVReg, ResType,
I);
388 case TargetOpcode::G_FRAME_INDEX:
389 return selectFrameIndex(ResVReg, ResType,
I);
391 case TargetOpcode::G_LOAD:
392 return selectLoad(ResVReg, ResType,
I);
393 case TargetOpcode::G_STORE:
394 return selectStore(
I);
396 case TargetOpcode::G_BR:
397 return selectBranch(
I);
398 case TargetOpcode::G_BRCOND:
399 return selectBranchCond(
I);
401 case TargetOpcode::G_PHI:
402 return selectPhi(ResVReg, ResType,
I);
404 case TargetOpcode::G_FPTOSI:
405 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToS);
406 case TargetOpcode::G_FPTOUI:
407 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToU);
409 case TargetOpcode::G_SITOFP:
410 return selectIToF(ResVReg, ResType,
I,
true, SPIRV::OpConvertSToF);
411 case TargetOpcode::G_UITOFP:
412 return selectIToF(ResVReg, ResType,
I,
false, SPIRV::OpConvertUToF);
414 case TargetOpcode::G_CTPOP:
415 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitCount);
416 case TargetOpcode::G_SMIN:
417 return selectExtInst(ResVReg, ResType,
I, CL::s_min, GL::SMin);
418 case TargetOpcode::G_UMIN:
419 return selectExtInst(ResVReg, ResType,
I, CL::u_min, GL::UMin);
421 case TargetOpcode::G_SMAX:
422 return selectExtInst(ResVReg, ResType,
I, CL::s_max, GL::SMax);
423 case TargetOpcode::G_UMAX:
424 return selectExtInst(ResVReg, ResType,
I, CL::u_max, GL::UMax);
426 case TargetOpcode::G_FMA:
427 return selectExtInst(ResVReg, ResType,
I, CL::fma, GL::Fma);
429 case TargetOpcode::G_FPOW:
430 return selectExtInst(ResVReg, ResType,
I, CL::pow, GL::Pow);
431 case TargetOpcode::G_FPOWI:
432 return selectExtInst(ResVReg, ResType,
I, CL::pown);
434 case TargetOpcode::G_FEXP:
435 return selectExtInst(ResVReg, ResType,
I, CL::exp, GL::Exp);
436 case TargetOpcode::G_FEXP2:
437 return selectExtInst(ResVReg, ResType,
I, CL::exp2, GL::Exp2);
439 case TargetOpcode::G_FLOG:
440 return selectExtInst(ResVReg, ResType,
I, CL::log, GL::Log);
441 case TargetOpcode::G_FLOG2:
442 return selectExtInst(ResVReg, ResType,
I, CL::log2, GL::Log2);
443 case TargetOpcode::G_FLOG10:
444 return selectLog10(ResVReg, ResType,
I);
446 case TargetOpcode::G_FABS:
447 return selectExtInst(ResVReg, ResType,
I, CL::fabs, GL::FAbs);
448 case TargetOpcode::G_ABS:
449 return selectExtInst(ResVReg, ResType,
I, CL::s_abs, GL::SAbs);
451 case TargetOpcode::G_FMINNUM:
452 case TargetOpcode::G_FMINIMUM:
453 return selectExtInst(ResVReg, ResType,
I, CL::fmin, GL::NMin);
454 case TargetOpcode::G_FMAXNUM:
455 case TargetOpcode::G_FMAXIMUM:
456 return selectExtInst(ResVReg, ResType,
I, CL::fmax, GL::NMax);
458 case TargetOpcode::G_FCOPYSIGN:
459 return selectExtInst(ResVReg, ResType,
I, CL::copysign);
461 case TargetOpcode::G_FCEIL:
462 return selectExtInst(ResVReg, ResType,
I, CL::ceil, GL::Ceil);
463 case TargetOpcode::G_FFLOOR:
464 return selectExtInst(ResVReg, ResType,
I, CL::floor, GL::Floor);
466 case TargetOpcode::G_FCOS:
467 return selectExtInst(ResVReg, ResType,
I, CL::cos, GL::Cos);
468 case TargetOpcode::G_FSIN:
469 return selectExtInst(ResVReg, ResType,
I, CL::sin, GL::Sin);
470 case TargetOpcode::G_FTAN:
471 return selectExtInst(ResVReg, ResType,
I, CL::tan, GL::Tan);
473 case TargetOpcode::G_FSQRT:
474 return selectExtInst(ResVReg, ResType,
I, CL::sqrt, GL::Sqrt);
476 case TargetOpcode::G_CTTZ:
477 case TargetOpcode::G_CTTZ_ZERO_UNDEF:
478 return selectExtInst(ResVReg, ResType,
I, CL::ctz);
479 case TargetOpcode::G_CTLZ:
480 case TargetOpcode::G_CTLZ_ZERO_UNDEF:
481 return selectExtInst(ResVReg, ResType,
I, CL::clz);
483 case TargetOpcode::G_INTRINSIC_ROUND:
484 return selectExtInst(ResVReg, ResType,
I, CL::round, GL::Round);
485 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
486 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
487 case TargetOpcode::G_INTRINSIC_TRUNC:
488 return selectExtInst(ResVReg, ResType,
I, CL::trunc, GL::Trunc);
489 case TargetOpcode::G_FRINT:
490 case TargetOpcode::G_FNEARBYINT:
491 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
493 case TargetOpcode::G_SMULH:
494 return selectExtInst(ResVReg, ResType,
I, CL::s_mul_hi);
495 case TargetOpcode::G_UMULH:
496 return selectExtInst(ResVReg, ResType,
I, CL::u_mul_hi);
498 case TargetOpcode::G_SADDSAT:
499 return selectExtInst(ResVReg, ResType,
I, CL::s_add_sat);
500 case TargetOpcode::G_UADDSAT:
501 return selectExtInst(ResVReg, ResType,
I, CL::u_add_sat);
502 case TargetOpcode::G_SSUBSAT:
503 return selectExtInst(ResVReg, ResType,
I, CL::s_sub_sat);
504 case TargetOpcode::G_USUBSAT:
505 return selectExtInst(ResVReg, ResType,
I, CL::u_sub_sat);
507 case TargetOpcode::G_SEXT:
508 return selectExt(ResVReg, ResType,
I,
true);
509 case TargetOpcode::G_ANYEXT:
510 case TargetOpcode::G_ZEXT:
511 return selectExt(ResVReg, ResType,
I,
false);
512 case TargetOpcode::G_TRUNC:
513 return selectTrunc(ResVReg, ResType,
I);
514 case TargetOpcode::G_FPTRUNC:
515 case TargetOpcode::G_FPEXT:
516 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpFConvert);
518 case TargetOpcode::G_PTRTOINT:
519 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertPtrToU);
520 case TargetOpcode::G_INTTOPTR:
521 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertUToPtr);
522 case TargetOpcode::G_BITCAST:
523 return selectBitcast(ResVReg, ResType,
I);
524 case TargetOpcode::G_ADDRSPACE_CAST:
525 return selectAddrSpaceCast(ResVReg, ResType,
I);
526 case TargetOpcode::G_PTR_ADD: {
531 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
535 assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
536 (*II).getOpcode() == TargetOpcode::COPY ||
537 (*II).getOpcode() == SPIRV::OpVariable) &&
539 Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32,
I,
TII),
I);
541 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
543 .
addUse(GR.getSPIRVTypeID(ResType))
545 SPIRV::Opcode::InBoundsPtrAccessChain))
548 .
addUse(
I.getOperand(2).getReg());
552 case TargetOpcode::G_ATOMICRMW_OR:
553 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicOr);
554 case TargetOpcode::G_ATOMICRMW_ADD:
555 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicIAdd);
556 case TargetOpcode::G_ATOMICRMW_AND:
557 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicAnd);
558 case TargetOpcode::G_ATOMICRMW_MAX:
559 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMax);
560 case TargetOpcode::G_ATOMICRMW_MIN:
561 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMin);
562 case TargetOpcode::G_ATOMICRMW_SUB:
563 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicISub);
564 case TargetOpcode::G_ATOMICRMW_XOR:
565 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicXor);
566 case TargetOpcode::G_ATOMICRMW_UMAX:
567 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMax);
568 case TargetOpcode::G_ATOMICRMW_UMIN:
569 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMin);
570 case TargetOpcode::G_ATOMICRMW_XCHG:
571 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicExchange);
572 case TargetOpcode::G_ATOMIC_CMPXCHG:
573 return selectAtomicCmpXchg(ResVReg, ResType,
I);
575 case TargetOpcode::G_ATOMICRMW_FADD:
576 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT);
577 case TargetOpcode::G_ATOMICRMW_FSUB:
579 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT,
581 case TargetOpcode::G_ATOMICRMW_FMIN:
582 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMinEXT);
583 case TargetOpcode::G_ATOMICRMW_FMAX:
584 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMaxEXT);
586 case TargetOpcode::G_FENCE:
587 return selectFence(
I);
589 case TargetOpcode::G_STACKSAVE:
590 return selectStackSave(ResVReg, ResType,
I);
591 case TargetOpcode::G_STACKRESTORE:
592 return selectStackRestore(
I);
594 case TargetOpcode::G_UNMERGE_VALUES:
602bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
605 CL::OpenCLExtInst CLInst)
const {
606 return selectExtInst(ResVReg, ResType,
I,
607 {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
610bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
613 CL::OpenCLExtInst CLInst,
614 GL::GLSLExtInst GLInst)
const {
615 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
616 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
617 return selectExtInst(ResVReg, ResType,
I, ExtInsts);
620bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
625 for (
const auto &Ex : Insts) {
626 SPIRV::InstructionSet::InstructionSet Set = Ex.first;
628 if (STI.canUseExtInstSet(Set)) {
630 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
632 .
addUse(GR.getSPIRVTypeID(ResType))
635 const unsigned NumOps =
I.getNumOperands();
636 for (
unsigned i = 1; i < NumOps; ++i)
637 MIB.add(
I.getOperand(i));
638 return MIB.constrainAllUses(
TII,
TRI, RBI);
644bool SPIRVInstructionSelector::selectUnOpWithSrc(
Register ResVReg,
648 unsigned Opcode)
const {
649 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
651 .
addUse(GR.getSPIRVTypeID(ResType))
656bool SPIRVInstructionSelector::selectUnOp(
Register ResVReg,
659 unsigned Opcode)
const {
660 if (STI.isOpenCLEnv() &&
I.getOperand(1).isReg()) {
661 Register SrcReg =
I.getOperand(1).getReg();
664 MRI->def_instr_begin(SrcReg);
665 DefIt !=
MRI->def_instr_end(); DefIt = std::next(DefIt)) {
666 if ((*DefIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE) {
674 case SPIRV::OpConvertPtrToU:
675 SpecOpcode =
static_cast<uint32_t>(SPIRV::Opcode::ConvertPtrToU);
677 case SPIRV::OpConvertUToPtr:
678 SpecOpcode =
static_cast<uint32_t>(SPIRV::Opcode::ConvertUToPtr);
682 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
683 TII.get(SPIRV::OpSpecConstantOp))
685 .
addUse(GR.getSPIRVTypeID(ResType))
691 return selectUnOpWithSrc(ResVReg, ResType,
I,
I.getOperand(1).getReg(),
695bool SPIRVInstructionSelector::selectBitcast(
Register ResVReg,
698 Register OpReg =
I.getOperand(1).getReg();
699 SPIRVType *OpType = OpReg.
isValid() ? GR.getSPIRVTypeForVReg(OpReg) :
nullptr;
700 if (!GR.isBitcastCompatible(ResType, OpType))
702 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitcast);
708 return SPIRV::Scope::Invocation;
710 return SPIRV::Scope::Device;
712 return SPIRV::Scope::Workgroup;
714 return SPIRV::Scope::CrossDevice;
716 return SPIRV::Scope::Subgroup;
725 return SPIRV::Scope::Device;
731 if (
MemOp->isVolatile())
732 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
733 if (
MemOp->isNonTemporal())
734 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
735 if (
MemOp->getAlign().value())
736 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
738 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
740 if (SpvMemOp &
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
747 if (Flags & MachineMemOperand::Flags::MOVolatile)
748 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
749 if (Flags & MachineMemOperand::Flags::MONonTemporal)
750 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
752 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None))
756bool SPIRVInstructionSelector::selectLoad(
Register ResVReg,
759 unsigned OpOffset = isa<GIntrinsic>(
I) ? 1 : 0;
761 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
763 .
addUse(GR.getSPIRVTypeID(ResType))
765 if (!
I.getNumMemOperands()) {
766 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
768 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
773 return MIB.constrainAllUses(
TII,
TRI, RBI);
776bool SPIRVInstructionSelector::selectStore(
MachineInstr &
I)
const {
777 unsigned OpOffset = isa<GIntrinsic>(
I) ? 1 : 0;
778 Register StoreVal =
I.getOperand(0 + OpOffset).getReg();
781 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpStore))
784 if (!
I.getNumMemOperands()) {
785 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
787 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
792 return MIB.constrainAllUses(
TII,
TRI, RBI);
795bool SPIRVInstructionSelector::selectStackSave(
Register ResVReg,
798 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
800 "llvm.stacksave intrinsic: this instruction requires the following "
801 "SPIR-V extension: SPV_INTEL_variable_length_array",
804 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSaveMemoryINTEL))
806 .
addUse(GR.getSPIRVTypeID(ResType))
810bool SPIRVInstructionSelector::selectStackRestore(
MachineInstr &
I)
const {
811 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
813 "llvm.stackrestore intrinsic: this instruction requires the following "
814 "SPIR-V extension: SPV_INTEL_variable_length_array",
816 if (!
I.getOperand(0).isReg())
819 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpRestoreMemoryINTEL))
820 .
addUse(
I.getOperand(0).getReg())
824bool SPIRVInstructionSelector::selectMemOperation(
Register ResVReg,
827 Register SrcReg =
I.getOperand(1).getReg();
828 if (
I.getOpcode() == TargetOpcode::G_MEMSET) {
829 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
832 SPIRVType *ValTy = GR.getOrCreateSPIRVIntegerType(8,
I,
TII);
833 SPIRVType *ArrTy = GR.getOrCreateSPIRVArrayType(ValTy, Num,
I,
TII);
835 SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType(
836 ArrTy,
I,
TII, SPIRV::StorageClass::UniformConstant);
846 GR.add(GV, GR.CurMF, VarReg);
849 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVariable))
851 .
addUse(GR.getSPIRVTypeID(VarTy))
852 .
addImm(SPIRV::StorageClass::UniformConstant)
855 SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType(
856 ValTy,
I,
TII, SPIRV::StorageClass::UniformConstant);
858 selectUnOpWithSrc(SrcReg, SourceTy,
I, VarReg, SPIRV::OpBitcast);
860 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCopyMemorySized))
861 .
addUse(
I.getOperand(0).getReg())
863 .
addUse(
I.getOperand(2).getReg());
864 if (
I.getNumMemOperands())
867 if (ResVReg.
isValid() && ResVReg != MIB->getOperand(0).getReg())
868 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(TargetOpcode::COPY), ResVReg)
869 .
addUse(MIB->getOperand(0).getReg());
873bool SPIRVInstructionSelector::selectAtomicRMW(
Register ResVReg,
877 unsigned NegateOpcode)
const {
882 Register ScopeReg = buildI32Constant(Scope,
I);
890 Register MemSemReg = buildI32Constant(MemSem ,
I);
893 Register ValueReg =
I.getOperand(2).getReg();
894 if (NegateOpcode != 0) {
896 Register TmpReg =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
897 Result |= selectUnOpWithSrc(TmpReg, ResType,
I, ValueReg, NegateOpcode);
903 .
addUse(GR.getSPIRVTypeID(ResType))
912bool SPIRVInstructionSelector::selectUnmergeValues(
MachineInstr &
I)
const {
913 unsigned ArgI =
I.getNumOperands() - 1;
915 I.getOperand(ArgI).isReg() ?
I.getOperand(ArgI).getReg() :
Register(0);
917 SrcReg.
isValid() ? GR.getSPIRVTypeForVReg(SrcReg) :
nullptr;
918 if (!DefType || DefType->
getOpcode() != SPIRV::OpTypeVector)
920 "cannot select G_UNMERGE_VALUES with a non-vector argument");
926 for (
unsigned i = 0; i <
I.getNumDefs(); ++i) {
927 Register ResVReg =
I.getOperand(i).getReg();
928 SPIRVType *ResType = GR.getSPIRVTypeForVReg(ResVReg);
931 ResType = ScalarType;
932 MRI->setRegClass(ResVReg, &SPIRV::IDRegClass);
933 MRI->setType(ResVReg,
LLT::scalar(GR.getScalarOrVectorBitWidth(ResType)));
934 GR.assignSPIRVTypeToVReg(ResType, ResVReg, *GR.CurMF);
937 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
939 .
addUse(GR.getSPIRVTypeID(ResType))
941 .
addImm(
static_cast<int64_t
>(i));
942 Res |= MIB.constrainAllUses(
TII,
TRI, RBI);
947bool SPIRVInstructionSelector::selectFence(
MachineInstr &
I)
const {
950 Register MemSemReg = buildI32Constant(MemSem,
I);
953 Register ScopeReg = buildI32Constant(Scope,
I);
955 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpMemoryBarrier))
961bool SPIRVInstructionSelector::selectAtomicCmpXchg(
Register ResVReg,
968 if (!isa<GIntrinsic>(
I)) {
973 ScopeReg = buildI32Constant(Scope,
I);
975 unsigned ScSem =
static_cast<uint32_t>(
979 MemSemEqReg = buildI32Constant(MemSemEq,
I);
983 MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq,
I);
985 ScopeReg =
I.getOperand(5).getReg();
986 MemSemEqReg =
I.getOperand(6).getReg();
987 MemSemNeqReg =
I.getOperand(7).getReg();
992 SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
993 Register ACmpRes =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
996 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpAtomicCompareExchange))
998 .
addUse(GR.getSPIRVTypeID(SpvValTy))
1006 Register CmpSuccReg =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1010 .
addUse(GR.getSPIRVTypeID(BoolTy))
1014 Register TmpReg =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1017 .
addUse(GR.getSPIRVTypeID(ResType))
1019 .
addUse(GR.getOrCreateUndef(
I, ResType,
TII))
1024 .
addUse(GR.getSPIRVTypeID(ResType))
1034 case SPIRV::StorageClass::Workgroup:
1035 case SPIRV::StorageClass::CrossWorkgroup:
1036 case SPIRV::StorageClass::Function:
1045 case SPIRV::StorageClass::DeviceOnlyINTEL:
1046 case SPIRV::StorageClass::HostOnlyINTEL:
1058bool SPIRVInstructionSelector::selectAddrSpaceCast(
Register ResVReg,
1063 auto UIs =
MRI->use_instructions(ResVReg);
1064 if (!UIs.empty() && ++UIs.begin() == UIs.end() &&
1065 (UIs.begin()->getOpcode() == SPIRV::OpConstantComposite ||
1066 UIs.begin()->getOpcode() == SPIRV::OpVariable ||
1068 Register NewReg =
I.getOperand(1).getReg();
1070 SPIRVType *SpvBaseTy = GR.getOrCreateSPIRVIntegerType(8,
I,
TII);
1071 ResType = GR.getOrCreateSPIRVPointerType(SpvBaseTy,
I,
TII,
1072 SPIRV::StorageClass::Generic);
1074 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
1076 .
addUse(GR.getSPIRVTypeID(ResType))
1082 Register SrcPtr =
I.getOperand(1).getReg();
1083 SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
1084 SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtr);
1085 SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResVReg);
1093 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
1096 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
1099 Register Tmp =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1100 SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
1101 SrcPtrTy,
I,
TII, SPIRV::StorageClass::Generic);
1106 .
addUse(GR.getSPIRVTypeID(GenericPtrTy))
1111 .
addUse(GR.getSPIRVTypeID(ResType))
1119 return selectUnOp(ResVReg, ResType,
I,
1120 SPIRV::OpPtrCastToCrossWorkgroupINTEL);
1122 return selectUnOp(ResVReg, ResType,
I,
1123 SPIRV::OpCrossWorkgroupCastToPtrINTEL);
1127 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitcast);
1134 return SPIRV::OpFOrdEqual;
1136 return SPIRV::OpFOrdGreaterThanEqual;
1138 return SPIRV::OpFOrdGreaterThan;
1140 return SPIRV::OpFOrdLessThanEqual;
1142 return SPIRV::OpFOrdLessThan;
1144 return SPIRV::OpFOrdNotEqual;
1146 return SPIRV::OpOrdered;
1148 return SPIRV::OpFUnordEqual;
1150 return SPIRV::OpFUnordGreaterThanEqual;
1152 return SPIRV::OpFUnordGreaterThan;
1154 return SPIRV::OpFUnordLessThanEqual;
1156 return SPIRV::OpFUnordLessThan;
1158 return SPIRV::OpFUnordNotEqual;
1160 return SPIRV::OpUnordered;
1170 return SPIRV::OpIEqual;
1172 return SPIRV::OpINotEqual;
1174 return SPIRV::OpSGreaterThanEqual;
1176 return SPIRV::OpSGreaterThan;
1178 return SPIRV::OpSLessThanEqual;
1180 return SPIRV::OpSLessThan;
1182 return SPIRV::OpUGreaterThanEqual;
1184 return SPIRV::OpUGreaterThan;
1186 return SPIRV::OpULessThanEqual;
1188 return SPIRV::OpULessThan;
1197 return SPIRV::OpPtrEqual;
1199 return SPIRV::OpPtrNotEqual;
1210 return SPIRV::OpLogicalEqual;
1212 return SPIRV::OpLogicalNotEqual;
1218bool SPIRVInstructionSelector::selectAnyOrAll(
Register ResVReg,
1221 unsigned OpAnyOrAll)
const {
1222 assert(
I.getNumOperands() == 3);
1223 assert(
I.getOperand(2).isReg());
1225 Register InputRegister =
I.getOperand(2).getReg();
1226 SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
1231 bool IsBoolTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeBool);
1232 bool IsVectorTy = InputType->
getOpcode() == SPIRV::OpTypeVector;
1233 if (IsBoolTy && !IsVectorTy) {
1234 assert(ResVReg ==
I.getOperand(0).getReg());
1235 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1236 TII.get(TargetOpcode::COPY))
1242 bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
1243 unsigned SpirvNotEqualId =
1244 IsFloatTy ? SPIRV::OpFOrdNotEqual : SPIRV::OpINotEqual;
1245 SPIRVType *SpvBoolScalarTy = GR.getOrCreateSPIRVBoolType(
I,
TII);
1250 NotEqualReg = IsBoolTy ? InputRegister
1251 :
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1253 SpvBoolTy = GR.getOrCreateSPIRVVectorType(SpvBoolTy, NumElts,
I,
TII);
1258 IsFloatTy ? buildZerosValF(InputType,
I) : buildZerosVal(InputType,
I);
1260 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SpirvNotEqualId))
1262 .
addUse(GR.getSPIRVTypeID(SpvBoolTy))
1271 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(OpAnyOrAll))
1273 .
addUse(GR.getSPIRVTypeID(SpvBoolScalarTy))
1278bool SPIRVInstructionSelector::selectAll(
Register ResVReg,
1281 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAll);
1284bool SPIRVInstructionSelector::selectAny(
Register ResVReg,
1287 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAny);
1290bool SPIRVInstructionSelector::selectFmix(
Register ResVReg,
1294 assert(
I.getNumOperands() == 5);
1295 assert(
I.getOperand(2).isReg());
1296 assert(
I.getOperand(3).isReg());
1297 assert(
I.getOperand(4).isReg());
1300 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1302 .
addUse(GR.getSPIRVTypeID(ResType))
1303 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1305 .
addUse(
I.getOperand(2).getReg())
1306 .
addUse(
I.getOperand(3).getReg())
1307 .
addUse(
I.getOperand(4).getReg())
1311bool SPIRVInstructionSelector::selectBitreverse(
Register ResVReg,
1315 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitReverse))
1317 .
addUse(GR.getSPIRVTypeID(ResType))
1318 .
addUse(
I.getOperand(1).getReg())
1322bool SPIRVInstructionSelector::selectFreeze(
Register ResVReg,
1330 if (!
I.getOperand(0).isReg() || !
I.getOperand(1).isReg())
1332 Register OpReg =
I.getOperand(1).getReg();
1335 switch (
Def->getOpcode()) {
1336 case SPIRV::ASSIGN_TYPE:
1338 MRI->getVRegDef(
Def->getOperand(1).getReg())) {
1339 if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1340 Reg =
Def->getOperand(2).getReg();
1343 case SPIRV::OpUndef:
1344 Reg =
Def->getOperand(1).getReg();
1347 unsigned DestOpCode;
1348 if (
Reg.isValid()) {
1349 DestOpCode = SPIRV::OpConstantNull;
1351 DestOpCode = TargetOpcode::COPY;
1354 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(DestOpCode))
1355 .
addDef(
I.getOperand(0).getReg())
1362bool SPIRVInstructionSelector::selectConstVector(
Register ResVReg,
1372 SPIRVType *ConstTy = this->MRI->getVRegDef(MO.getReg());
1373 assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
1374 ConstTy->getOperand(1).isReg());
1375 Register ConstReg = ConstTy->getOperand(1).getReg();
1376 const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
1378 return (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
1379 Const->getOpcode() == TargetOpcode::G_FCONSTANT);
1382 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1383 TII.get(SPIRV::OpConstantComposite))
1385 .
addUse(GR.getSPIRVTypeID(ResType));
1386 for (
unsigned i =
I.getNumExplicitDefs(); i <
I.getNumExplicitOperands(); ++i)
1387 MIB.
addUse(
I.getOperand(i).getReg());
1397 if (OpDef->
getOpcode() == SPIRV::ASSIGN_TYPE &&
1402 unsigned N = OpDef->
getOpcode() == TargetOpcode::G_CONSTANT
1410 if (OpDef->
getOpcode() == SPIRV::ASSIGN_TYPE &&
1415 return OpDef->
getOpcode() == TargetOpcode::G_CONSTANT ||
1416 OpDef->
getOpcode() == TargetOpcode::G_FCONSTANT;
1426bool SPIRVInstructionSelector::selectSplatVector(
Register ResVReg,
1430 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
1431 N = GR.getScalarOrVectorComponentCount(ResType);
1432 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
1437 unsigned OpIdx =
I.getNumExplicitDefs();
1438 if (!
I.getOperand(OpIdx).isReg())
1442 Register OpReg =
I.getOperand(OpIdx).getReg();
1445 if (!IsConst &&
N < 2)
1447 "There must be at least two constituent operands in a vector");
1449 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1450 TII.get(IsConst ? SPIRV::OpConstantComposite
1451 : SPIRV::OpCompositeConstruct))
1453 .
addUse(GR.getSPIRVTypeID(ResType));
1454 for (
unsigned i = 0; i <
N; ++i)
1459bool SPIRVInstructionSelector::selectCmp(
Register ResVReg,
1463 Register Cmp0 =
I.getOperand(2).getReg();
1464 Register Cmp1 =
I.getOperand(3).getReg();
1465 assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
1466 GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
1467 "CMP operands should have the same type");
1468 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(CmpOpc))
1470 .
addUse(GR.getSPIRVTypeID(ResType))
1476bool SPIRVInstructionSelector::selectICmp(
Register ResVReg,
1479 auto Pred =
I.getOperand(1).getPredicate();
1482 Register CmpOperand =
I.getOperand(2).getReg();
1483 if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
1485 else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
1489 return selectCmp(ResVReg, ResType, CmpOpc,
I);
1495 assert(
I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
1496 "Expected G_FCONSTANT");
1497 const ConstantFP *FPImm =
I.getOperand(1).getFPImm();
1504 assert(
I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1505 "Expected G_CONSTANT");
1506 addNumImm(
I.getOperand(1).getCImm()->getValue(), MIB);
1514 ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32,
I,
TII);
1516 auto ConstInt = ConstantInt::get(LLVMTy, Val);
1517 Register NewReg = GR.find(ConstInt, GR.CurMF);
1520 GR.add(ConstInt, GR.CurMF, NewReg);
1524 MI =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
1526 .
addUse(GR.getSPIRVTypeID(SpvI32Ty));
1528 MI =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantI))
1530 .
addUse(GR.getSPIRVTypeID(SpvI32Ty))
1538bool SPIRVInstructionSelector::selectFCmp(
Register ResVReg,
1542 return selectCmp(ResVReg, ResType, CmpOp,
I);
1548 bool ZeroAsNull = STI.isOpenCLEnv();
1549 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
1550 return GR.getOrCreateConstVector(0UL,
I, ResType,
TII, ZeroAsNull);
1551 return GR.getOrCreateConstInt(0,
I, ResType,
TII, ZeroAsNull);
1571 bool ZeroAsNull = STI.isOpenCLEnv();
1573 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
1574 return GR.getOrCreateConstVector(VZero,
I, ResType,
TII, ZeroAsNull);
1575 return GR.getOrCreateConstFP(VZero,
I, ResType,
TII, ZeroAsNull);
1581 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1584 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
1589bool SPIRVInstructionSelector::selectSelect(
Register ResVReg,
1592 bool IsSigned)
const {
1594 Register ZeroReg = buildZerosVal(ResType,
I);
1595 Register OneReg = buildOnesVal(IsSigned, ResType,
I);
1597 GR.isScalarOfType(
I.getOperand(1).getReg(), SPIRV::OpTypeBool);
1599 IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectSIVCond;
1600 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
1602 .
addUse(GR.getSPIRVTypeID(ResType))
1603 .
addUse(
I.getOperand(1).getReg())
1609bool SPIRVInstructionSelector::selectIToF(
Register ResVReg,
1612 unsigned Opcode)
const {
1613 Register SrcReg =
I.getOperand(1).getReg();
1616 if (GR.isScalarOrVectorOfType(
I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
1617 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1619 if (ResType->
getOpcode() == SPIRV::OpTypeVector) {
1621 TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts,
I,
TII);
1623 SrcReg =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1624 selectSelect(SrcReg, TmpType,
I,
false);
1626 return selectUnOpWithSrc(ResVReg, ResType,
I, SrcReg, Opcode);
1629bool SPIRVInstructionSelector::selectExt(
Register ResVReg,
1632 Register SrcReg =
I.getOperand(1).getReg();
1633 if (GR.isScalarOrVectorOfType(SrcReg, SPIRV::OpTypeBool))
1634 return selectSelect(ResVReg, ResType,
I, IsSigned);
1636 SPIRVType *SrcType = GR.getSPIRVTypeForVReg(SrcReg);
1637 if (SrcType == ResType)
1638 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1639 TII.get(TargetOpcode::COPY))
1644 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1645 return selectUnOp(ResVReg, ResType,
I, Opcode);
1648bool SPIRVInstructionSelector::selectIntToBool(
Register IntReg,
1654 Register BitIntReg =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1655 bool IsVectorTy = IntTy->
getOpcode() == SPIRV::OpTypeVector;
1656 unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
1658 Register One = buildOnesVal(
false, IntTy,
I);
1662 .
addUse(GR.getSPIRVTypeID(IntTy))
1666 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpINotEqual))
1668 .
addUse(GR.getSPIRVTypeID(BoolTy))
1674bool SPIRVInstructionSelector::selectTrunc(
Register ResVReg,
1677 Register IntReg =
I.getOperand(1).getReg();
1678 const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
1679 if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool))
1680 return selectIntToBool(IntReg, ResVReg,
I, ArgType, ResType);
1681 if (ArgType == ResType)
1682 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1683 TII.get(TargetOpcode::COPY))
1687 bool IsSigned = GR.isScalarOrVectorSigned(ResType);
1688 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1689 return selectUnOp(ResVReg, ResType,
I, Opcode);
1692bool SPIRVInstructionSelector::selectConst(
Register ResVReg,
1696 unsigned TyOpcode = ResType->
getOpcode();
1697 assert(TyOpcode != SPIRV::OpTypePointer ||
Imm.isZero());
1699 if ((TyOpcode == SPIRV::OpTypePointer || TyOpcode == SPIRV::OpTypeEvent) &&
1701 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
1703 .
addUse(GR.getSPIRVTypeID(ResType))
1705 if (TyOpcode == SPIRV::OpTypeInt) {
1706 assert(
Imm.getBitWidth() <= 64 &&
"Unsupported integer width!");
1710 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(TargetOpcode::COPY))
1715 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantI))
1717 .
addUse(GR.getSPIRVTypeID(ResType));
1724bool SPIRVInstructionSelector::selectOpUndef(
Register ResVReg,
1727 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
1729 .
addUse(GR.getSPIRVTypeID(ResType))
1736 if (TypeInst->
getOpcode() != SPIRV::ASSIGN_TYPE)
1740 return ImmInst->
getOpcode() == TargetOpcode::G_CONSTANT;
1750bool SPIRVInstructionSelector::selectInsertVal(
Register ResVReg,
1754 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeInsert))
1756 .
addUse(GR.getSPIRVTypeID(ResType))
1758 .
addUse(
I.getOperand(3).getReg())
1760 .
addUse(
I.getOperand(2).getReg());
1761 for (
unsigned i = 4; i <
I.getNumOperands(); i++)
1766bool SPIRVInstructionSelector::selectExtractVal(
Register ResVReg,
1770 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1772 .
addUse(GR.getSPIRVTypeID(ResType))
1773 .
addUse(
I.getOperand(2).getReg());
1774 for (
unsigned i = 3; i <
I.getNumOperands(); i++)
1779bool SPIRVInstructionSelector::selectInsertElt(
Register ResVReg,
1783 return selectInsertVal(ResVReg, ResType,
I);
1785 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorInsertDynamic))
1787 .
addUse(GR.getSPIRVTypeID(ResType))
1788 .
addUse(
I.getOperand(2).getReg())
1789 .
addUse(
I.getOperand(3).getReg())
1790 .
addUse(
I.getOperand(4).getReg())
1794bool SPIRVInstructionSelector::selectExtractElt(
Register ResVReg,
1798 return selectExtractVal(ResVReg, ResType,
I);
1800 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorExtractDynamic))
1802 .
addUse(GR.getSPIRVTypeID(ResType))
1803 .
addUse(
I.getOperand(2).getReg())
1804 .
addUse(
I.getOperand(3).getReg())
1808bool SPIRVInstructionSelector::selectGEP(
Register ResVReg,
1811 const bool IsGEPInBounds =
I.getOperand(2).getImm();
1816 const unsigned Opcode = STI.isVulkanEnv()
1817 ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
1818 : SPIRV::OpAccessChain)
1819 : (IsGEPInBounds ? SPIRV::OpInBoundsPtrAccessChain
1820 : SPIRV::OpPtrAccessChain);
1822 auto Res =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
1824 .
addUse(GR.getSPIRVTypeID(ResType))
1826 .
addUse(
I.getOperand(3).getReg());
1828 const unsigned StartingIndex =
1829 (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
1832 for (
unsigned i = StartingIndex; i <
I.getNumExplicitOperands(); ++i)
1833 Res.addUse(
I.getOperand(i).getReg());
1834 return Res.constrainAllUses(
TII,
TRI, RBI);
1838bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
1841 unsigned Lim =
I.getNumExplicitOperands();
1842 for (
unsigned i =
I.getNumExplicitDefs() + 1; i < Lim; ++i) {
1843 Register OpReg =
I.getOperand(i).getReg();
1845 SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
1847 OpDefine->
getOpcode() == TargetOpcode::G_ADDRSPACE_CAST) {
1854 Register WrapReg = GR.find(OpDefine, MF);
1860 WrapReg =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1861 GR.add(OpDefine, MF, WrapReg);
1865 GR.assignSPIRVTypeToVReg(OpType, WrapReg, *MF);
1869 .
addUse(GR.getSPIRVTypeID(OpType))
1879bool SPIRVInstructionSelector::selectIntrinsic(
Register ResVReg,
1885 case Intrinsic::spv_load:
1886 return selectLoad(ResVReg, ResType,
I);
1887 case Intrinsic::spv_store:
1888 return selectStore(
I);
1889 case Intrinsic::spv_extractv:
1890 return selectExtractVal(ResVReg, ResType,
I);
1891 case Intrinsic::spv_insertv:
1892 return selectInsertVal(ResVReg, ResType,
I);
1893 case Intrinsic::spv_extractelt:
1894 return selectExtractElt(ResVReg, ResType,
I);
1895 case Intrinsic::spv_insertelt:
1896 return selectInsertElt(ResVReg, ResType,
I);
1897 case Intrinsic::spv_gep:
1898 return selectGEP(ResVReg, ResType,
I);
1899 case Intrinsic::spv_unref_global:
1900 case Intrinsic::spv_init_global: {
1903 ?
MRI->getVRegDef(
I.getOperand(2).getReg())
1906 return selectGlobalValue(
MI->getOperand(0).getReg(), *
MI,
Init);
1908 case Intrinsic::spv_undef: {
1909 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
1911 .
addUse(GR.getSPIRVTypeID(ResType));
1914 case Intrinsic::spv_const_composite: {
1916 bool IsNull =
I.getNumExplicitDefs() + 1 ==
I.getNumExplicitOperands();
1918 unsigned Opcode = SPIRV::OpConstantNull;
1921 Opcode = SPIRV::OpConstantComposite;
1922 if (!wrapIntoSpecConstantOp(
I, CompositeArgs))
1925 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
1927 .
addUse(GR.getSPIRVTypeID(ResType));
1930 for (
Register OpReg : CompositeArgs)
1935 case Intrinsic::spv_assign_name: {
1936 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpName));
1937 MIB.
addUse(
I.getOperand(
I.getNumExplicitDefs() + 1).getReg());
1938 for (
unsigned i =
I.getNumExplicitDefs() + 2;
1939 i <
I.getNumExplicitOperands(); ++i) {
1940 MIB.
addImm(
I.getOperand(i).getImm());
1944 case Intrinsic::spv_switch: {
1945 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSwitch));
1946 for (
unsigned i = 1; i <
I.getNumExplicitOperands(); ++i) {
1947 if (
I.getOperand(i).isReg())
1948 MIB.
addReg(
I.getOperand(i).getReg());
1949 else if (
I.getOperand(i).isCImm())
1950 addNumImm(
I.getOperand(i).getCImm()->getValue(), MIB);
1951 else if (
I.getOperand(i).isMBB())
1952 MIB.
addMBB(
I.getOperand(i).getMBB());
1958 case Intrinsic::spv_cmpxchg:
1959 return selectAtomicCmpXchg(ResVReg, ResType,
I);
1960 case Intrinsic::spv_unreachable:
1961 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUnreachable));
1963 case Intrinsic::spv_alloca:
1964 return selectFrameIndex(ResVReg, ResType,
I);
1965 case Intrinsic::spv_alloca_array:
1966 return selectAllocaArray(ResVReg, ResType,
I);
1967 case Intrinsic::spv_assume:
1968 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
1969 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpAssumeTrueKHR))
1970 .
addUse(
I.getOperand(1).getReg());
1972 case Intrinsic::spv_expect:
1973 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
1974 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExpectKHR))
1976 .
addUse(GR.getSPIRVTypeID(ResType))
1977 .
addUse(
I.getOperand(2).getReg())
1978 .
addUse(
I.getOperand(3).getReg());
1980 case Intrinsic::spv_thread_id:
1981 return selectSpvThreadId(ResVReg, ResType,
I);
1982 case Intrinsic::spv_all:
1983 return selectAll(ResVReg, ResType,
I);
1984 case Intrinsic::spv_any:
1985 return selectAny(ResVReg, ResType,
I);
1986 case Intrinsic::spv_lerp:
1987 return selectFmix(ResVReg, ResType,
I);
1988 case Intrinsic::spv_lifetime_start:
1989 case Intrinsic::spv_lifetime_end: {
1990 unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
1991 : SPIRV::OpLifetimeStop;
1992 int64_t
Size =
I.getOperand(
I.getNumExplicitDefs() + 1).getImm();
1993 Register PtrReg =
I.getOperand(
I.getNumExplicitDefs() + 2).getReg();
1994 unsigned PonteeOpType = GR.getPointeeTypeOp(PtrReg);
1995 bool IsNonvoidPtr = PonteeOpType != 0 && PonteeOpType != SPIRV::OpTypeVoid;
1996 if (
Size == -1 || IsNonvoidPtr)
2001 std::string DiagMsg;
2004 DiagMsg =
"Intrinsic selection not implemented: " + DiagMsg;
2011bool SPIRVInstructionSelector::selectAllocaArray(
Register ResVReg,
2018 TII.get(SPIRV::OpVariableLengthArrayINTEL))
2020 .
addUse(GR.getSPIRVTypeID(ResType))
2021 .
addUse(
I.getOperand(2).getReg())
2025bool SPIRVInstructionSelector::selectFrameIndex(
Register ResVReg,
2033 bool IsHeader =
false;
2035 for (; It != E && It !=
I; ++It) {
2036 Opcode = It->getOpcode();
2037 if (Opcode == SPIRV::OpFunction || Opcode == SPIRV::OpFunctionParameter) {
2039 }
else if (IsHeader &&
2040 !(Opcode == SPIRV::ASSIGN_TYPE || Opcode == SPIRV::OpLabel)) {
2045 return BuildMI(*
MBB, It, It->getDebugLoc(),
TII.get(SPIRV::OpVariable))
2047 .
addUse(GR.getSPIRVTypeID(ResType))
2052bool SPIRVInstructionSelector::selectBranch(
MachineInstr &
I)
const {
2059 if (PrevI !=
nullptr && PrevI->
getOpcode() == TargetOpcode::G_BRCOND) {
2060 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
2063 .
addMBB(
I.getOperand(0).getMBB())
2067 .
addMBB(
I.getOperand(0).getMBB())
2071bool SPIRVInstructionSelector::selectBranchCond(
MachineInstr &
I)
const {
2084 if (NextI !=
nullptr && NextI->
getOpcode() == SPIRV::OpBranchConditional)
2091 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
2092 .
addUse(
I.getOperand(0).getReg())
2093 .
addMBB(
I.getOperand(1).getMBB())
2098bool SPIRVInstructionSelector::selectPhi(
Register ResVReg,
2101 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpPhi))
2103 .
addUse(GR.getSPIRVTypeID(ResType));
2104 const unsigned NumOps =
I.getNumOperands();
2105 for (
unsigned i = 1; i < NumOps; i += 2) {
2106 MIB.
addUse(
I.getOperand(i + 0).getReg());
2107 MIB.
addMBB(
I.getOperand(i + 1).getMBB());
2112bool SPIRVInstructionSelector::selectGlobalValue(
2117 Type *GVType = GR.getDeducedGlobalValueType(GV);
2122 SPIRV::AccessQualifier::ReadWrite,
false);
2123 PointerBaseType = GR.getOrCreateSPIRVArrayType(
2126 PointerBaseType = GR.getOrCreateSPIRVType(
2127 GVType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
false);
2129 SPIRVType *ResType = GR.getOrCreateSPIRVPointerType(
2130 PointerBaseType,
I,
TII,
2133 std::string GlobalIdent;
2135 unsigned &
ID = UnnamedGlobalIDs[GV];
2137 ID = UnnamedGlobalIDs.size();
2138 GlobalIdent =
"__unnamed_" +
Twine(
ID).
str();
2153 if (isa<Function>(GV)) {
2156 Register NewReg = GR.find(ConstVal, GR.CurMF);
2159 GR.add(ConstVal, GR.CurMF, NewReg);
2161 STI.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)
2162 ? dyn_cast<Function>(GV)
2170 MRI->setRegClass(FuncVReg, &SPIRV::IDRegClass);
2173 TII.get(SPIRV::OpConstantFunctionPointerINTEL))
2175 .
addUse(GR.getSPIRVTypeID(ResType))
2181 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
2183 .
addUse(GR.getSPIRVTypeID(ResType))
2186 assert(NewReg != ResVReg);
2187 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(TargetOpcode::COPY))
2192 auto GlobalVar = cast<GlobalVariable>(GV);
2195 bool HasInit =
GlobalVar->hasInitializer() &&
2196 !isa<UndefValue>(
GlobalVar->getInitializer());
2199 if (HasInit && !
Init)
2203 SPIRV::StorageClass::StorageClass Storage =
2206 Storage != SPIRV::StorageClass::Function;
2207 SPIRV::LinkageType::LinkageType LnkType =
2209 ? SPIRV::LinkageType::Import
2211 STI.canUseExtension(SPIRV::Extension::SPV_KHR_linkonce_odr)
2212 ? SPIRV::LinkageType::LinkOnceODR
2213 : SPIRV::LinkageType::Export);
2215 Register Reg = GR.buildGlobalVariable(ResVReg, ResType, GlobalIdent, GV,
2217 HasLnkTy, LnkType, MIRBuilder,
true);
2218 return Reg.isValid();
2221bool SPIRVInstructionSelector::selectLog10(
Register ResVReg,
2224 if (STI.canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std)) {
2225 return selectExtInst(ResVReg, ResType,
I, CL::log10);
2237 Register VarReg =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2239 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
2241 .
addUse(GR.getSPIRVTypeID(ResType))
2242 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
2244 .
add(
I.getOperand(1))
2249 ResType->
getOpcode() == SPIRV::OpTypeFloat);
2252 ResType->
getOpcode() == SPIRV::OpTypeVector
2256 GR.buildConstantFP(
APFloat(0.30103f), MIRBuilder, SpirvScalarType);
2259 auto Opcode = ResType->
getOpcode() == SPIRV::OpTypeVector
2260 ? SPIRV::OpVectorTimesScalar
2264 .
addUse(GR.getSPIRVTypeID(ResType))
2272bool SPIRVInstructionSelector::selectSpvThreadId(
Register ResVReg,
2280 const SPIRVType *U32Type = GR.getOrCreateSPIRVIntegerType(32, MIRBuilder);
2282 GR.getOrCreateSPIRVVectorType(U32Type, 3, MIRBuilder);
2283 const SPIRVType *PtrType = GR.getOrCreateSPIRVPointerType(
2284 Vec3Ty, MIRBuilder, SPIRV::StorageClass::Input);
2288 MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::IDRegClass);
2289 MIRBuilder.getMRI()->setType(NewRegister,
LLT::pointer(0, 32));
2290 GR.assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
2293 Register Variable = GR.buildGlobalVariable(
2294 NewRegister, PtrType,
2296 SPIRV::StorageClass::Input,
nullptr,
true,
true,
2297 SPIRV::LinkageType::Import, MIRBuilder,
false);
2301 Register LoadedRegister =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2302 MIRBuilder.getMRI()->setType(LoadedRegister,
LLT::pointer(0, 32));
2303 GR.assignSPIRVTypeToVReg(Vec3Ty, LoadedRegister, MIRBuilder.getMF());
2306 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
2308 .
addUse(GR.getSPIRVTypeID(Vec3Ty))
2313 assert(
I.getOperand(2).isReg());
2314 Register ThreadIdReg =
I.getOperand(2).getReg();
2320 assert(Const &&
Const->getOpcode() == TargetOpcode::G_CONSTANT);
2326 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
2328 .
addUse(GR.getSPIRVTypeID(ResType))
2339 return new SPIRVInstructionSelector(
TM, Subtarget, RBI);
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
unsigned const TargetRegisterInfo * TRI
const char LLVMTargetMachineRef TM
static StringRef getName(Value *V)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
std::vector< std::pair< SPIRV::InstructionSet::InstructionSet, uint32_t > > ExtInstList
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC)
static APFloat getZeroFP(const Type *LLVMFloatTy)
static void addMemoryOperands(MachineMemOperand *MemOp, MachineInstrBuilder &MIB)
static unsigned getFCmpOpcode(unsigned PredNum)
bool isTypeFoldingSupported(unsigned Opcode)
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static SPIRV::Scope::Scope getScope(SyncScope::ID Ord, SPIRVMachineModuleInfo *MMI)
static unsigned getBoolCmpOpcode(unsigned PredNum)
static unsigned getICmpOpcode(unsigned PredNum)
static bool isConstReg(MachineRegisterInfo *MRI, SPIRVType *OpDef)
static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
static unsigned getPtrCmpOpcode(unsigned Pred)
static unsigned getArrayComponentCount(MachineRegisterInfo *MRI, const SPIRVType *ResType)
APInt bitcastToAPInt() const
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
uint64_t getZExtValue() const
Get zero extended value.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
This is an important base class in LLVM.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This class represents an Operation in the Expression.
const Function & getFunction() const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
LinkageTypes getLinkage() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
static std::string getGlobalIdentifier(StringRef Name, GlobalValue::LinkageTypes Linkage, StringRef FileName)
Return the modified name for a global value suitable to be used as the key for a global lookup (e....
bool hasAvailableExternallyLinkage() const
@ InternalLinkage
Rename collisions when linking (static functions).
@ LinkOnceODRLinkage
Same, but only replaced by something equivalent.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
This is an important class for using LLVM in a threaded context.
SyncScope::ID getOrInsertSyncScopeID(StringRef SSN)
getOrInsertSyncScopeID - Maps synchronization scope name to synchronization scope ID.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
iterator SkipPHIsAndLabels(iterator I)
Return the first instruction in MBB after I that is not a PHI or a label.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
MachineModuleInfo & getMMI() const
const MachineBasicBlock & front() const
Helper class to build MachineInstr.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
This class can be derived from and used by targets to hold private target-specific information for ea...
This class contains meta information specific to a module.
const Module * getModule() const
Ty & getObjFileInfo()
Keep track of various per-module pieces of information for backends that would like to do so.
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
Register getReg() const
getReg - Returns the register number.
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
LLVMContext & getContext() const
Get the global data context.
Analysis providing profile information.
Holds all the information related to register banks.
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
SyncScope::ID SubGroupSSID
SPIRVMachineModuleInfo(const MachineModuleInfo &MMI)
SyncScope::ID AllSVMDevicesSSID
SyncScope::ID Work_ItemSSID
SyncScope::ID WorkGroupSSID
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
std::string str() const
Return the twine contents as a std::string.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isArrayTy() const
True if this is an instance of ArrayType.
Type * getArrayElementType() const
uint64_t getArrayNumElements() const
@ HalfTyID
16-bit floating point type
@ FloatTyID
32-bit floating point type
@ DoubleTyID
64-bit floating point type
TypeID getTypeID() const
Return the type id for the type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
@ System
Synchronized with respect to all concurrently executing threads.
Reg
All possible values of the reg field in the ModR/M byte.
Scope
Defines the scope in which this symbol should be visible: Default – Visible in the public interface o...
NodeAddr< DefNode * > Def
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
AtomicOrdering
Atomic ordering for LLVM's memory model.
InstructionSelector * createSPIRVInstructionSelector(const SPIRVTargetMachine &TM, const SPIRVSubtarget &Subtarget, const RegisterBankInfo &RBI)
constexpr unsigned BitWidth
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)