49#define DEBUG_TYPE "aarch64-postlegalizer-lowering"
52using namespace MIPatternMatch;
53using namespace AArch64GISelUtils;
63 std::initializer_list<SrcOp>
SrcOps)
73 "Only possible block sizes for REV are: 16, 32, 64");
74 assert(EltSize != 64 &&
"EltSize cannot be 64 for REV mask.");
76 unsigned BlockElts = M[0] + 1;
85 for (
unsigned i = 0; i < NumElts; ++i) {
89 if (
static_cast<unsigned>(M[i]) !=
90 (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts))
100 unsigned &WhichResult) {
101 if (NumElts % 2 != 0)
103 WhichResult = (M[0] == 0 ? 0 : 1);
104 for (
unsigned i = 0; i < NumElts; i += 2) {
105 if ((M[i] >= 0 &&
static_cast<unsigned>(M[i]) != i + WhichResult) ||
107 static_cast<unsigned>(M[i + 1]) != i + NumElts + WhichResult))
118 auto FirstRealElt =
find_if(M, [](
int Elt) {
return Elt >= 0; });
119 if (FirstRealElt == M.end())
124 APInt ExpectedElt =
APInt(MaskBits, *FirstRealElt + 1);
130 [&ExpectedElt](
int Elt) { return Elt != ExpectedElt++ && Elt >= 0; }))
140 bool ReverseExt =
false;
152 return std::make_pair(ReverseExt, Imm);
158 unsigned &WhichResult) {
159 WhichResult = (M[0] == 0 ? 0 : 1);
160 for (
unsigned i = 0; i != NumElts; ++i) {
164 if (
static_cast<unsigned>(M[i]) != 2 * i + WhichResult)
173 unsigned &WhichResult) {
174 if (NumElts % 2 != 0)
178 WhichResult = (M[0] == 0 ? 0 : 1);
179 unsigned Idx = WhichResult * NumElts / 2;
180 for (
unsigned i = 0; i != NumElts; i += 2) {
181 if ((M[i] >= 0 &&
static_cast<unsigned>(M[i]) !=
Idx) ||
182 (M[i + 1] >= 0 &&
static_cast<unsigned>(M[i + 1]) !=
Idx + NumElts))
198 int NumInputElements) {
199 if (M.size() !=
static_cast<size_t>(NumInputElements))
201 int NumLHSMatch = 0, NumRHSMatch = 0;
202 int LastLHSMismatch = -1, LastRHSMismatch = -1;
203 for (
int Idx = 0;
Idx < NumInputElements; ++
Idx) {
209 M[
Idx] ==
Idx ? ++NumLHSMatch : LastLHSMismatch =
Idx;
210 M[
Idx] ==
Idx + NumInputElements ? ++NumRHSMatch : LastRHSMismatch =
Idx;
212 const int NumNeededToMatch = NumInputElements - 1;
213 if (NumLHSMatch == NumNeededToMatch)
214 return std::make_pair(
true, LastLHSMismatch);
215 if (NumRHSMatch == NumNeededToMatch)
216 return std::make_pair(
false, LastRHSMismatch);
224 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
228 LLT Ty =
MRI.getType(Dst);
238 if (
isREVMask(ShuffleMask, EltSize, NumElts, 64)) {
253 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
254 unsigned WhichResult;
257 unsigned NumElts =
MRI.getType(Dst).getNumElements();
258 if (!
isTRNMask(ShuffleMask, NumElts, WhichResult))
260 unsigned Opc = (WhichResult == 0) ? AArch64::G_TRN1 : AArch64::G_TRN2;
274 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
275 unsigned WhichResult;
278 unsigned NumElts =
MRI.getType(Dst).getNumElements();
279 if (!
isUZPMask(ShuffleMask, NumElts, WhichResult))
281 unsigned Opc = (WhichResult == 0) ? AArch64::G_UZP1 : AArch64::G_UZP2;
290 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
291 unsigned WhichResult;
294 unsigned NumElts =
MRI.getType(Dst).getNumElements();
295 if (!
isZipMask(ShuffleMask, NumElts, WhichResult))
297 unsigned Opc = (WhichResult == 0) ? AArch64::G_ZIP1 : AArch64::G_ZIP2;
325 auto *InsMI =
getOpcodeDef(TargetOpcode::G_INSERT_VECTOR_ELT,
326 MI.getOperand(1).getReg(),
MRI);
330 if (!
getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, InsMI->getOperand(1).getReg(),
339 {InsMI->getOperand(2).getReg()});
347 assert(Lane >= 0 &&
"Expected positive lane?");
350 auto *BuildVecMI =
getOpcodeDef(TargetOpcode::G_BUILD_VECTOR,
351 MI.getOperand(1).getReg(),
MRI);
354 Register Reg = BuildVecMI->getOperand(Lane + 1).getReg();
362 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
366 int Lane = *MaybeLane;
389 unsigned ExpectedElt = M[0];
390 for (
unsigned I = 1;
I < NumElts; ++
I) {
394 if (ExpectedElt == NumElts)
399 if (ExpectedElt !=
static_cast<unsigned>(M[
I]))
408 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
410 LLT DstTy =
MRI.getType(Dst);
413 auto Mask =
MI.getOperand(3).getShuffleMask();
416 uint64_t ExtFactor =
MRI.getType(V1).getScalarSizeInBits() / 8;
419 if (!getOpcodeDef<GImplicitDef>(V2,
MRI) ||
423 Imm = Mask[0] * ExtFactor;
428 std::tie(ReverseExt, Imm) = *ExtInfo;
442 MI.eraseFromParent();
455 {MatchInfo.SrcOps[0], MatchInfo.SrcOps[1], Cst});
456 MI.eraseFromParent();
472 std::tuple<Register, int, Register, int> &MatchInfo) {
473 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
476 int NumElts =
MRI.getType(Dst).getNumElements();
477 auto DstIsLeftAndDstLane =
isINSMask(ShuffleMask, NumElts);
478 if (!DstIsLeftAndDstLane)
482 std::tie(DstIsLeft, DstLane) = *DstIsLeftAndDstLane;
488 int SrcLane = ShuffleMask[DstLane];
489 if (SrcLane >= NumElts) {
494 MatchInfo = std::make_tuple(DstVec, DstLane, SrcVec, SrcLane);
500 std::tuple<Register, int, Register, int> &MatchInfo) {
503 auto ScalarTy =
MRI.getType(Dst).getElementType();
505 int DstLane, SrcLane;
506 std::tie(DstVec, DstLane, SrcVec, SrcLane) = MatchInfo;
508 auto Extract =
Builder.buildExtractVectorElement(ScalarTy, SrcVec, SrcCst);
510 Builder.buildInsertVectorElement(Dst, DstVec, Extract, DstCst);
511 MI.eraseFromParent();
520 assert(Ty.
isVector() &&
"vector shift count is not a vector type");
527 return Cnt >= 1 && Cnt <= ElementBits;
533 assert(
MI.getOpcode() == TargetOpcode::G_ASHR ||
534 MI.getOpcode() == TargetOpcode::G_LSHR);
535 LLT Ty =
MRI.getType(
MI.getOperand(1).getReg());
543 unsigned Opc =
MI.getOpcode();
544 assert(Opc == TargetOpcode::G_ASHR || Opc == TargetOpcode::G_LSHR);
546 Opc == TargetOpcode::G_ASHR ? AArch64::G_VASHR : AArch64::G_VLSHR;
549 MIB.
buildInstr(NewOpc, {
MI.getOperand(0)}, {
MI.getOperand(1), ImmDef});
550 MI.eraseFromParent();
561std::optional<std::pair<uint64_t, CmpInst::Predicate>>
564 const auto &Ty =
MRI.getType(
RHS);
567 unsigned Size = Ty.getSizeInBits();
568 assert((
Size == 32 ||
Size == 64) &&
"Expected 32 or 64 bit compare only?");
575 uint64_t C = ValAndVReg->Value.getZExtValue();
593 (
Size == 32 &&
static_cast<int32_t
>(
C) == INT32_MIN))
619 if ((
Size == 32 &&
static_cast<int32_t
>(
C) == INT32_MAX) ||
633 if ((
Size == 32 &&
static_cast<uint32_t>(
C) == UINT32_MAX) ||
660 std::pair<uint64_t, CmpInst::Predicate> &MatchInfo) {
661 assert(
MI.getOpcode() == TargetOpcode::G_ICMP);
665 MatchInfo = *MaybeNewImmAndPred;
672 MachineInstr &
MI, std::pair<uint64_t, CmpInst::Predicate> &MatchInfo,
680 RHS.setReg(Cst->getOperand(0).getReg());
681 MI.getOperand(1).setPredicate(MatchInfo.second);
687 std::pair<unsigned, int> &MatchInfo) {
688 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
690 const LLT SrcTy =
MRI.getType(Src1Reg);
691 const LLT DstTy =
MRI.getType(
MI.getOperand(0).getReg());
710 if (ScalarSize == 64)
711 Opc = AArch64::G_DUPLANE64;
712 else if (ScalarSize == 32)
713 Opc = AArch64::G_DUPLANE32;
716 if (ScalarSize == 32)
717 Opc = AArch64::G_DUPLANE32;
720 if (ScalarSize == 16)
721 Opc = AArch64::G_DUPLANE16;
725 Opc = AArch64::G_DUPLANE8;
733 MatchInfo.first = Opc;
734 MatchInfo.second = *LaneIdx;
740 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
742 const LLT SrcTy =
MRI.getType(Src1Reg);
744 B.setInstrAndDebugLoc(
MI);
745 auto Lane =
B.buildConstant(
LLT::scalar(64), MatchInfo.second);
751 assert(
MRI.getType(
MI.getOperand(0).getReg()).getNumElements() == 2 &&
752 "Unexpected dest elements");
753 auto Undef =
B.buildUndef(SrcTy);
754 DupSrc =
B.buildConcatVectors(
756 {Src1Reg, Undef.getReg(0)})
759 B.buildInstr(MatchInfo.first, {MI.getOperand(0).getReg()}, {DupSrc, Lane});
760 MI.eraseFromParent();
765 assert(
MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
774 int64_t Cst = Splat->getCst();
775 return (Cst != 0 && Cst != -1);
780 B.setInstrAndDebugLoc(
MI);
781 B.buildInstr(AArch64::G_DUP, {
MI.getOperand(0).
getReg()},
783 MI.eraseFromParent();
792 if (!
MRI.hasOneNonDBGUse(CmpOp))
797 if (
MI.getOpcode() == TargetOpcode::G_SEXT_INREG)
799 if (
MI.getOpcode() != TargetOpcode::G_AND)
805 uint64_t Mask = ValAndVReg->Value.getZExtValue();
806 return (Mask == 0xFF || Mask == 0xFFFF || Mask == 0xFFFFFFFF);
810 if (IsSupportedExtend(*Def))
813 unsigned Opc = Def->getOpcode();
814 if (Opc != TargetOpcode::G_SHL && Opc != TargetOpcode::G_ASHR &&
815 Opc != TargetOpcode::G_LSHR)
822 uint64_t ShiftAmt = MaybeShiftAmt->Value.getZExtValue();
829 if (IsSupportedExtend(*ShiftLHS))
830 return (ShiftAmt <= 4) ? 2 : 1;
832 LLT Ty =
MRI.getType(Def->getOperand(0).getReg());
836 if ((ShiftSize == 32 && ShiftAmt <= 31) ||
837 (ShiftSize == 64 && ShiftAmt <= 63))
846 assert(
MI.getOpcode() == TargetOpcode::G_ICMP);
865 auto GetRegForProfit = [&](
Register Reg) {
867 return isCMN(Def, Pred,
MRI) ? Def->getOperand(2).getReg() : Reg;
888 MI.getOperand(2).setReg(
RHS);
889 MI.getOperand(3).setReg(
LHS);
903 assert(DstTy ==
MRI.getType(
RHS) &&
"Src and Dst types must match!");
910 ? MIB.buildInstr(AArch64::G_FCMEQZ, {DstTy}, {
LHS})
911 : MIB.buildInstr(AArch64::G_FCMEQ, {DstTy}, {
LHS,
RHS});
912 return MIB.buildNot(DstTy, FCmp).getReg(0);
917 ? MIB.buildInstr(AArch64::G_FCMEQZ, {DstTy}, {
LHS}).
getReg(0)
918 : MIB.buildInstr(AArch64::G_FCMEQ, {DstTy}, {
LHS,
RHS})
924 ? MIB.buildInstr(AArch64::G_FCMGEZ, {DstTy}, {
LHS}).
getReg(0)
925 : MIB.buildInstr(AArch64::G_FCMGE, {DstTy}, {
LHS,
RHS})
931 ? MIB.buildInstr(AArch64::G_FCMGTZ, {DstTy}, {
LHS}).
getReg(0)
932 : MIB.buildInstr(AArch64::G_FCMGT, {DstTy}, {
LHS,
RHS})
938 ? MIB.buildInstr(AArch64::G_FCMLEZ, {DstTy}, {
LHS}).
getReg(0)
939 : MIB.buildInstr(AArch64::G_FCMGE, {DstTy}, {
RHS,
LHS})
945 ? MIB.buildInstr(AArch64::G_FCMLTZ, {DstTy}, {
LHS}).
getReg(0)
946 : MIB.buildInstr(AArch64::G_FCMGT, {DstTy}, {
RHS,
LHS})
955 assert(
MI.getOpcode() == TargetOpcode::G_FCMP);
958 LLT DstTy =
MRI.getType(Dst);
959 if (!DstTy.
isVector() || !ST.hasNEON())
965 unsigned EltSize =
MRI.getType(
LHS).getScalarSizeInBits();
966 if (EltSize != 32 && EltSize != 64)
972 bool IsZero = Splat && Splat->isCst() && Splat->getCst() == 0;
977 if (Pred == CmpInst::Predicate::FCMP_ORD && IsZero) {
987 bool NoNans = ST.getTargetLowering()->getTargetMachine().Options.NoNaNsFPMath;
997 auto Cmp2Dst = Cmp2(MIB);
998 auto Cmp1Dst = Cmp(MIB);
1003 MRI.replaceRegWith(Dst, CmpRes);
1004 MI.eraseFromParent();
1010 assert(
MI.getOpcode() == TargetOpcode::G_STORE);
1012 if (
MRI.getType(DstReg).isVector())
1018 return MRI.getType(SrcReg).getSizeInBits() <= 64;
1025 assert(
MI.getOpcode() == TargetOpcode::G_STORE);
1027 MI.getOperand(0).setReg(SrcReg);
1036 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
1038 LLT DstTy =
MRI.getType(DstReg);
1045 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
1046 B.setInstrAndDebugLoc(
MI);
1051#define AARCH64POSTLEGALIZERLOWERINGHELPER_GENCOMBINERHELPER_DEPS
1052#include "AArch64GenPostLegalizeGILowering.inc"
1053#undef AARCH64POSTLEGALIZERLOWERINGHELPER_GENCOMBINERHELPER_DEPS
1056#define AARCH64POSTLEGALIZERLOWERINGHELPER_GENCOMBINERHELPER_H
1057#include "AArch64GenPostLegalizeGILowering.inc"
1058#undef AARCH64POSTLEGALIZERLOWERINGHELPER_GENCOMBINERHELPER_H
1060class AArch64PostLegalizerLoweringInfo :
public CombinerInfo {
1062 AArch64GenPostLegalizerLoweringHelperRuleConfig GeneratedRuleCfg;
1064 AArch64PostLegalizerLoweringInfo(
bool OptSize,
bool MinSize)
1066 nullptr,
true, OptSize,
1068 if (!GeneratedRuleCfg.parseCommandLineOption())
1080 AArch64GenPostLegalizerLoweringHelper
Generated(GeneratedRuleCfg);
1081 return Generated.tryCombineAll(Observer,
MI,
B, Helper);
1084#define AARCH64POSTLEGALIZERLOWERINGHELPER_GENCOMBINERHELPER_CPP
1085#include "AArch64GenPostLegalizeGILowering.inc"
1086#undef AARCH64POSTLEGALIZERLOWERINGHELPER_GENCOMBINERHELPER_CPP
1092 AArch64PostLegalizerLowering();
1095 return "AArch64PostLegalizerLowering";
1103void AArch64PostLegalizerLowering::getAnalysisUsage(
AnalysisUsage &AU)
const {
1110AArch64PostLegalizerLowering::AArch64PostLegalizerLowering()
1115bool AArch64PostLegalizerLowering::runOnMachineFunction(
MachineFunction &MF) {
1117 MachineFunctionProperties::Property::FailedISel))
1120 MachineFunctionProperties::Property::Legalized) &&
1121 "Expected a legalized function?");
1122 auto *TPC = &getAnalysis<TargetPassConfig>();
1124 AArch64PostLegalizerLoweringInfo PCInfo(
F.hasOptSize(),
F.hasMinSize());
1126 return C.combineMachineInstrs(MF,
nullptr);
1129char AArch64PostLegalizerLowering::ID = 0;
1131 "Lower AArch64 MachineInstrs after legalization",
false,
1140 return new AArch64PostLegalizerLowering();
unsigned const MachineRegisterInfo * MRI
static bool isLegalArithImmed(uint64_t C)
static bool isCMN(SDValue Op, ISD::CondCode CC)
This file declares the targeting of the Machinelegalizer class for AArch64.
static bool lowerVectorFCMP(MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &MIB)
Try to lower a vector G_FCMP MI into an AArch64-specific pseudo.
std::optional< std::pair< uint64_t, CmpInst::Predicate > > tryAdjustICmpImmAndPred(Register RHS, CmpInst::Predicate P, const MachineRegisterInfo &MRI)
Determine if it is possible to modify the RHS and predicate P of a G_ICMP instruction such that the r...
static bool matchVAshrLshrImm(MachineInstr &MI, MachineRegisterInfo &MRI, int64_t &Imm)
Match a vector G_ASHR or G_LSHR with a valid immediate shift.
static void applyVectorSextInReg(MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &B, GISelChangeObserver &Observer)
static bool applyINS(MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &Builder, std::tuple< Register, int, Register, int > &MatchInfo)
static bool matchDupFromBuildVector(int Lane, MachineInstr &MI, MachineRegisterInfo &MRI, ShuffleVectorPseudo &MatchInfo)
Helper function for matchDup.
static std::function< Register(MachineIRBuilder &)> getVectorFCMP(AArch64CC::CondCode CC, Register LHS, Register RHS, bool IsZero, bool NoNans, MachineRegisterInfo &MRI)
static bool matchUZP(MachineInstr &MI, MachineRegisterInfo &MRI, ShuffleVectorPseudo &MatchInfo)
static bool matchTRN(MachineInstr &MI, MachineRegisterInfo &MRI, ShuffleVectorPseudo &MatchInfo)
static bool applyVAshrLshrImm(MachineInstr &MI, MachineRegisterInfo &MRI, int64_t &Imm)
static bool isVShiftRImm(Register Reg, MachineRegisterInfo &MRI, LLT Ty, int64_t &Cnt)
isVShiftRImm - Check if this is a valid vector for the immediate operand of a vector shift right oper...
bool matchDupLane(MachineInstr &MI, MachineRegisterInfo &MRI, std::pair< unsigned, int > &MatchInfo)
static bool matchINS(MachineInstr &MI, MachineRegisterInfo &MRI, std::tuple< Register, int, Register, int > &MatchInfo)
Match a G_SHUFFLE_VECTOR with a mask which corresponds to a G_INSERT_VECTOR_ELT and G_EXTRACT_VECTOR_...
static bool matchDup(MachineInstr &MI, MachineRegisterInfo &MRI, ShuffleVectorPseudo &MatchInfo)
static bool matchDupFromInsertVectorElt(int Lane, MachineInstr &MI, MachineRegisterInfo &MRI, ShuffleVectorPseudo &MatchInfo)
Helper function for matchDup.
static std::optional< std::pair< bool, int > > isINSMask(ArrayRef< int > M, int NumInputElements)
Helper function for matchINS.
Lower AArch64 MachineInstrs after legalization
static bool trySwapICmpOperands(MachineInstr &MI, const MachineRegisterInfo &MRI)
static bool applyFormTruncstore(MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &B, GISelChangeObserver &Observer, Register &SrcReg)
static bool matchEXT(MachineInstr &MI, MachineRegisterInfo &MRI, ShuffleVectorPseudo &MatchInfo)
static bool matchZip(MachineInstr &MI, MachineRegisterInfo &MRI, ShuffleVectorPseudo &MatchInfo)
static bool matchVectorSextInReg(MachineInstr &MI, MachineRegisterInfo &MRI)
static bool applySwapICmpOperands(MachineInstr &MI, GISelChangeObserver &Observer)
static bool matchBuildVectorToDup(MachineInstr &MI, MachineRegisterInfo &MRI)
bool applyAdjustICmpImmAndPred(MachineInstr &MI, std::pair< uint64_t, CmpInst::Predicate > &MatchInfo, MachineIRBuilder &MIB, GISelChangeObserver &Observer)
bool applyDupLane(MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &B, std::pair< unsigned, int > &MatchInfo)
static bool matchFormTruncstore(MachineInstr &MI, MachineRegisterInfo &MRI, Register &SrcReg)
static bool isZipMask(ArrayRef< int > M, unsigned NumElts, unsigned &WhichResult)
static bool matchREV(MachineInstr &MI, MachineRegisterInfo &MRI, ShuffleVectorPseudo &MatchInfo)
static bool isTRNMask(ArrayRef< int > M, unsigned NumElts, unsigned &WhichResult)
Determines if M is a shuffle vector mask for a TRN of NumElts.
static bool isUZPMask(ArrayRef< int > M, unsigned NumElts, unsigned &WhichResult)
Determines if M is a shuffle vector mask for a UZP of NumElts.
static bool applyShuffleVectorPseudo(MachineInstr &MI, ShuffleVectorPseudo &MatchInfo)
Replace a G_SHUFFLE_VECTOR instruction with a pseudo.
bool matchAdjustICmpImmAndPred(MachineInstr &MI, const MachineRegisterInfo &MRI, std::pair< uint64_t, CmpInst::Predicate > &MatchInfo)
Determine whether or not it is possible to update the RHS and predicate of a G_ICMP instruction such ...
static bool isREVMask(ArrayRef< int > M, unsigned EltSize, unsigned NumElts, unsigned BlockSize)
Check if a vector shuffle corresponds to a REV instruction with the specified blocksize.
static bool isSingletonExtMask(ArrayRef< int > M, LLT Ty)
static std::optional< std::pair< bool, uint64_t > > getExtMask(ArrayRef< int > M, unsigned NumElts)
Check if a G_EXT instruction can handle a shuffle mask M when the vector sources of the shuffle are d...
static unsigned getCmpOperandFoldingProfit(Register CmpOp, const MachineRegisterInfo &MRI)
static bool applyEXT(MachineInstr &MI, ShuffleVectorPseudo &MatchInfo)
Replace a G_SHUFFLE_VECTOR instruction with G_EXT.
static bool applyBuildVectorToDup(MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &B)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This contains common combine transformations that may be used in a combine pass,or by the target else...
Interface for Targets to specify which operations are combined how and when.
This contains common code to drive combines.
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This contains common code to allow clients to notify changes to machine instr.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static const int BlockSize
Target-Independent Code Generator Pass Configuration Options pass.
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
unsigned logBase2() const
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
void setPreservesCFG()
This function should be called by the pass, iff they do not:
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
virtual bool combine(GISelChangeObserver &Observer, MachineInstr &MI, MachineIRBuilder &B) const =0
Attempt to combine instructions using MI as the root.
static constexpr ElementCount getFixed(ScalarTy MinVal)
FunctionPass class - This class is used to implement most global optimizations.
Abstract class that contains various methods for clients to notify about changes.
virtual void changingInstr(MachineInstr &MI)=0
This instruction is about to be mutated in some way.
virtual void changedInstr(MachineInstr &MI)=0
This instruction was mutated in some way.
constexpr unsigned getScalarSizeInBits() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr LLT changeElementCount(ElementCount EC) const
Return a vector or scalar with the same element type and the new element count.
LegalizeResult lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty)
Legalize an instruction by splitting it into simpler parts, hopefully understood by the target.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
bool hasProperty(Property P) const
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineFunctionProperties & getProperties() const
Get the function properties.
Helper class to build MachineInstr.
MachineInstrBuilder buildNot(const DstOp &Dst, const SrcOp &Src0)
Build and insert a bitwise not, NegOne = G_CONSTANT -1 Res = G_OR Op0, NegOne.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
void setInstrAndDebugLoc(MachineInstr &MI)
Set the insertion point to before MI, and set the debug loc to MI's loc.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildOr(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_OR Op0, Op1.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Register getReg(unsigned Idx) const
Get the register for the operand index.
Representation of each machine instruction.
MachineOperand class - Representation of each machine instruction operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Wrapper class representing virtual and physical registers.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Target-Independent Code Generator Pass Configuration Options.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
std::optional< RegOrConstant > getAArch64VectorSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI)
void changeVectorFCMPPredToAArch64CC(const CmpInst::Predicate P, AArch64CC::CondCode &CondCode, AArch64CC::CondCode &CondCode2, bool &Invert)
Find the AArch64 condition codes necessary to represent P for a vector floating point comparison.
std::optional< int64_t > getAArch64VectorSplatScalar(const MachineInstr &MI, const MachineRegisterInfo &MRI)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
operand_type_match m_Reg()
SpecificConstantMatch m_ZeroInt()
{ Convenience matchers for specific integer values.
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
UnaryOp_match< SrcTy, TargetOpcode::G_TRUNC > m_GTrunc(const SrcTy &Src)
This is an optimization pass for GlobalISel generic memory operations.
MachineInstr * getOpcodeDef(unsigned Opcode, Register Reg, const MachineRegisterInfo &MRI)
See if Reg is defined by an single def instruction that is Opcode.
void initializeAArch64PostLegalizerLoweringPass(PassRegistry &)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
FunctionPass * createAArch64PostLegalizerLowering()
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
int getSplatIndex(ArrayRef< int > Mask)
If all non-negative Mask elements are the same value, return that value.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Represents a pseudo instruction which replaces a G_SHUFFLE_VECTOR.
ShuffleVectorPseudo()=default
unsigned Opc
Opcode for the instruction. (E.g. G_ZIP1)
Register Dst
Destination register.
ShuffleVectorPseudo(unsigned Opc, Register Dst, std::initializer_list< SrcOp > SrcOps)
SmallVector< SrcOp, 2 > SrcOps
Source registers.