18#include "llvm/IR/IntrinsicsMips.h"
34 if (MemSize > AlignInBits)
41 std::initializer_list<TypesAndMemOps> SupportedValues) {
42 unsigned QueryMemSize = Query.
MMODescrs[0].MemoryTy.getSizeInBits();
48 for (
auto &Val : SupportedValues) {
49 if (Val.ValTy != Query.
Types[0])
51 if (Val.PtrTy != Query.
Types[1])
53 if (Val.MemSize != QueryMemSize)
55 if (!Val.SystemSupportsUnalignedAccess &&
64 std::initializer_list<LLT> SupportedValues) {
69 using namespace TargetOpcode;
86 if (ST.hasMSA() &&
CheckTyN(0, Query, {v16s8, v8s16, v4s32, v2s64}))
90 .clampScalar(0, s32, s32);
93 .lowerFor({{s32, s1}});
104 bool NoAlignRequirements =
true;
109 Query, {{s32, p0, 8, NoAlignRequirements},
110 {s32, p0, 16, ST.systemSupportsUnalignedAccess()},
111 {s32, p0, 32, NoAlignRequirements},
112 {p0, p0, 32, NoAlignRequirements},
113 {s64, p0, 64, ST.systemSupportsUnalignedAccess()}}))
116 Query, {{v16s8, p0, 128, NoAlignRequirements},
117 {v8s16, p0, 128, NoAlignRequirements},
118 {v4s32, p0, 128, NoAlignRequirements},
119 {v2s64, p0, 128, NoAlignRequirements}}))
127 if (!Query.
Types[0].isScalar() || Query.
Types[1] != p0 ||
128 Query.
Types[0] == s1)
131 unsigned Size = Query.
Types[0].getSizeInBits();
132 unsigned QueryMemSize = Query.
MMODescrs[0].MemoryTy.getSizeInBits();
133 assert(QueryMemSize <=
Size &&
"Scalar can't hold MemSize");
135 if (
Size > 64 || QueryMemSize > 64)
141 if (!ST.systemSupportsUnalignedAccess() &&
144 assert(QueryMemSize != 32 &&
"4 byte load and store are legal");
163 .legalForTypesWithMemDesc({{s32, p0, s8, 8},
165 .clampScalar(0, s32, s32);
196 .clampScalar(0, s32, s32);
202 if (ST.hasMSA() &&
CheckTyN(0, Query, {v16s8, v8s16, v4s32, v2s64}))
210 .legalFor({{s32, s32}})
211 .clampScalar(1, s32, s32)
216 .clampScalar(1, s32, s32)
221 .clampScalar(0, s32, s32);
224 .legalFor({{p0, s32}});
243 if (ST.hasMips32r2() &&
CheckTyN(0, Query, {s32}))
248 if (!ST.hasMips32r2() &&
CheckTyN(0, Query, {s32}))
270 .
lowerFor({{s32, s32}, {s64, s64}});
274 .clampScalar(0, s32, s32)
285 if (ST.hasMSA() &&
CheckTyN(0, Query, {v16s8, v8s16, v4s32, v2s64}))
295 .libcallFor({s32, s64});
306 .libcallForCartesianProduct({s64}, {s64, s32})
311 .lowerForCartesianProduct({s32}, {s64, s32})
317 .libcallForCartesianProduct({s64, s32}, {s64})
322 .customForCartesianProduct({s64, s32}, {s32})
330 verify(*ST.getInstrInfo());
335 using namespace TargetOpcode;
343 switch (
MI.getOpcode()) {
346 unsigned MemSize = (**
MI.memoperands_begin()).
getSize();
348 unsigned Size =
MRI.getType(Val).getSizeInBits();
352 assert(MemSize <= 8 &&
"MemSize is too large");
353 assert(
Size <= 64 &&
"Scalar size is too large");
357 unsigned P2HalfMemSize, RemMemSize;
359 P2HalfMemSize = RemMemSize = MemSize / 2;
361 P2HalfMemSize = 1 <<
Log2_32(MemSize);
362 RemMemSize = MemSize - P2HalfMemSize;
365 Register BaseAddr =
MI.getOperand(1).getReg();
366 LLT PtrTy =
MRI.getType(BaseAddr);
372 if (
MI.getOpcode() == G_STORE) {
379 auto C_P2HalfMemSize = MIRBuilder.
buildConstant(s32, P2HalfMemSize);
382 if (
MI.getOpcode() == G_STORE && MemSize <= 4) {
383 MIRBuilder.
buildStore(Val, BaseAddr, *P2HalfMemOp);
384 auto C_P2Half_InBits = MIRBuilder.
buildConstant(s32, P2HalfMemSize * 8);
385 auto Shift = MIRBuilder.
buildLShr(s32, Val, C_P2Half_InBits);
389 MIRBuilder.
buildStore(Unmerge.getReg(0), BaseAddr, *P2HalfMemOp);
394 if (
MI.getOpcode() == G_LOAD) {
401 MIRBuilder.
buildLoad(Val, BaseAddr, *Load4MMO);
403 auto Load = MIRBuilder.
buildLoad(s32, BaseAddr, *Load4MMO);
408 auto C_P2HalfMemSize = MIRBuilder.
buildConstant(s32, P2HalfMemSize);
411 auto Load_P2Half = MIRBuilder.
buildLoad(s32, BaseAddr, *P2HalfMemOp);
423 MI.eraseFromParent();
429 LLT DstTy =
MRI.getType(Dst);
430 LLT SrcTy =
MRI.getType(Src);
434 if (DstTy != s32 && DstTy != s64)
444 auto C_HiMask = MIRBuilder.
buildConstant(s32, UINT32_C(0x43300000));
449 s64, llvm::bit_cast<double>(UINT64_C(0x4330000000000000)));
452 MIRBuilder.
buildFSub(Dst, Bitcast, TwoP52FP);
458 MI.eraseFromParent();
471 assert(ST.hasMSA() &&
"MSA intrinsic not supported on target without MSA.");
473 .
add(
MI.getOperand(0))
474 .
add(
MI.getOperand(2))
475 .
add(
MI.getOperand(3))
477 *ST.getRegBankInfo()))
479 MI.eraseFromParent();
486 assert(ST.hasMSA() &&
"MSA intrinsic not supported on target without MSA.");
488 .
add(
MI.getOperand(0))
489 .
add(
MI.getOperand(2))
490 .
add(
MI.getOperand(3));
491 MI.eraseFromParent();
498 assert(ST.hasMSA() &&
"MSA intrinsic not supported on target without MSA.");
500 .
add(
MI.getOperand(0))
501 .
add(
MI.getOperand(2));
502 MI.eraseFromParent();
514 switch (cast<GIntrinsic>(
MI).getIntrinsicID()) {
515 case Intrinsic::trap: {
517 MI.eraseFromParent();
520 case Intrinsic::vacopy: {
525 *
MI.getMF()->getMachineMemOperand(
528 *
MI.getMF()->getMachineMemOperand(
530 MI.eraseFromParent();
533 case Intrinsic::mips_addv_b:
534 case Intrinsic::mips_addv_h:
535 case Intrinsic::mips_addv_w:
536 case Intrinsic::mips_addv_d:
538 case Intrinsic::mips_addvi_b:
540 case Intrinsic::mips_addvi_h:
542 case Intrinsic::mips_addvi_w:
544 case Intrinsic::mips_addvi_d:
546 case Intrinsic::mips_subv_b:
547 case Intrinsic::mips_subv_h:
548 case Intrinsic::mips_subv_w:
549 case Intrinsic::mips_subv_d:
551 case Intrinsic::mips_subvi_b:
553 case Intrinsic::mips_subvi_h:
555 case Intrinsic::mips_subvi_w:
557 case Intrinsic::mips_subvi_d:
559 case Intrinsic::mips_mulv_b:
560 case Intrinsic::mips_mulv_h:
561 case Intrinsic::mips_mulv_w:
562 case Intrinsic::mips_mulv_d:
564 case Intrinsic::mips_div_s_b:
565 case Intrinsic::mips_div_s_h:
566 case Intrinsic::mips_div_s_w:
567 case Intrinsic::mips_div_s_d:
569 case Intrinsic::mips_mod_s_b:
570 case Intrinsic::mips_mod_s_h:
571 case Intrinsic::mips_mod_s_w:
572 case Intrinsic::mips_mod_s_d:
574 case Intrinsic::mips_div_u_b:
575 case Intrinsic::mips_div_u_h:
576 case Intrinsic::mips_div_u_w:
577 case Intrinsic::mips_div_u_d:
579 case Intrinsic::mips_mod_u_b:
580 case Intrinsic::mips_mod_u_h:
581 case Intrinsic::mips_mod_u_w:
582 case Intrinsic::mips_mod_u_d:
584 case Intrinsic::mips_fadd_w:
585 case Intrinsic::mips_fadd_d:
587 case Intrinsic::mips_fsub_w:
588 case Intrinsic::mips_fsub_d:
590 case Intrinsic::mips_fmul_w:
591 case Intrinsic::mips_fmul_d:
593 case Intrinsic::mips_fdiv_w:
594 case Intrinsic::mips_fdiv_d:
596 case Intrinsic::mips_fmax_a_w:
598 case Intrinsic::mips_fmax_a_d:
600 case Intrinsic::mips_fsqrt_w:
602 case Intrinsic::mips_fsqrt_d:
unsigned const MachineRegisterInfo * MRI
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
This file declares the MachineIRBuilder class.
unsigned const TargetRegisterInfo * TRI
static bool CheckTy0Ty1MemSizeAlign(const LegalityQuery &Query, std::initializer_list< TypesAndMemOps > SupportedValues)
static bool CheckTyN(unsigned N, const LegalityQuery &Query, std::initializer_list< LLT > SupportedValues)
static bool MSA2OpIntrinsicToGeneric(MachineInstr &MI, unsigned Opcode, MachineIRBuilder &MIRBuilder, const MipsSubtarget &ST)
static bool SelectMSA3OpIntrinsic(MachineInstr &MI, unsigned Opcode, MachineIRBuilder &MIRBuilder, const MipsSubtarget &ST)
static bool isUnalignedMemmoryAccess(uint64_t MemSize, uint64_t AlignInBits)
static bool MSA3OpIntrinsicToGeneric(MachineInstr &MI, unsigned Opcode, MachineIRBuilder &MIRBuilder, const MipsSubtarget &ST)
This file declares the targeting of the Machinelegalizer class for Mips.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static unsigned getSize(unsigned Kind)
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
void computeTables()
Compute any ancillary tables needed to quickly decide how an operation should be handled.
LegalizeRuleSet & minScalar(unsigned TypeIdx, const LLT Ty)
Ensure the scalar is at least as wide as Ty.
LegalizeRuleSet & legalFor(std::initializer_list< LLT > Types)
The instruction is legal when type index 0 is any type in the given list.
LegalizeRuleSet & libcallFor(std::initializer_list< LLT > Types)
LegalizeRuleSet & maxScalar(unsigned TypeIdx, const LLT Ty)
Ensure the scalar is at most as wide as Ty.
LegalizeRuleSet & libcallForCartesianProduct(std::initializer_list< LLT > Types)
LegalizeRuleSet & lower()
The instruction is lowered.
LegalizeRuleSet & lowerFor(std::initializer_list< LLT > Types)
The instruction is lowered when type index 0 is any type in the given list.
LegalizeRuleSet & clampScalar(unsigned TypeIdx, const LLT MinTy, const LLT MaxTy)
Limit the range of scalar sizes to MinTy and MaxTy.
LegalizeRuleSet & legalForCartesianProduct(std::initializer_list< LLT > Types)
The instruction is legal when type indexes 0 and 1 are both in the given list.
LegalizeRuleSet & legalIf(LegalityPredicate Predicate)
The instruction is legal if predicate is true.
MachineIRBuilder & MIRBuilder
Expose MIRBuilder so clients can set their own RecordInsertInstruction functions.
LegalizeRuleSet & getActionDefinitionsBuilder(unsigned Opcode)
Get the action definition builder for the given opcode.
const LegacyLegalizerInfo & getLegacyLegalizerInfo() const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
Helper class to build MachineInstr.
MachineInstrBuilder buildFSub(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FSUB Op0, Op1.
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
const TargetInstrInfo & getTII()
MachineInstrBuilder buildLShr(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
virtual MachineInstrBuilder buildFConstant(const DstOp &Res, const ConstantFP &Val)
Build and insert Res = G_FCONSTANT Val.
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_TRUNC Op.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPTRUNC Op.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & add(const MachineOperand &MO) const
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
Representation of each machine instruction.
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
MipsLegalizerInfo(const MipsSubtarget &ST)
bool legalizeCustom(LegalizerHelper &Helper, MachineInstr &MI) const override
Called for instructions with the Custom LegalizationAction.
bool legalizeIntrinsic(LegalizerHelper &Helper, MachineInstr &MI) const override
Holds all the information related to register banks.
Wrapper class representing virtual and physical registers.
This is an optimization pass for GlobalISel generic memory operations.
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
bool SystemSupportsUnalignedAccess
This struct is a compact representation of a valid (non-zero power of two) alignment.
The LegalityQuery object bundles together all the information that's needed to decide whether a given...
ArrayRef< MemDesc > MMODescrs
Operations which require memory can use this to place requirements on the memory type for each MMO.
This class contains a discriminated union of information about pointers in memory operands,...