50#define GET_GICOMBINER_DEPS
51#include "AArch64GenPostLegalizeGILowering.inc"
52#undef GET_GICOMBINER_DEPS
54#define DEBUG_TYPE "aarch64-postlegalizer-lowering"
57using namespace MIPatternMatch;
58using namespace AArch64GISelUtils;
62#define GET_GICOMBINER_TYPES
63#include "AArch64GenPostLegalizeGILowering.inc"
64#undef GET_GICOMBINER_TYPES
69struct ShuffleVectorPseudo {
73 ShuffleVectorPseudo(
unsigned Opc,
Register Dst,
74 std::initializer_list<SrcOp> SrcOps)
75 : Opc(Opc), Dst(Dst), SrcOps(SrcOps){};
76 ShuffleVectorPseudo() =
default;
84 "Only possible block sizes for REV are: 16, 32, 64");
85 assert(EltSize != 64 &&
"EltSize cannot be 64 for REV mask.");
87 unsigned BlockElts =
M[0] + 1;
96 for (
unsigned i = 0; i < NumElts; ++i) {
100 if (
static_cast<unsigned>(M[i]) !=
101 (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts))
111 if (NumElts % 2 != 0)
113 WhichResult = (
M[0] == 0 ? 0 : 1);
114 for (
unsigned i = 0; i < NumElts; i += 2) {
115 if ((M[i] >= 0 &&
static_cast<unsigned>(M[i]) != i + WhichResult) ||
117 static_cast<unsigned>(
M[i + 1]) != i + NumElts + WhichResult))
125std::optional<std::pair<bool, uint64_t>> getExtMask(
ArrayRef<int> M,
128 auto FirstRealElt =
find_if(M, [](
int Elt) {
return Elt >= 0; });
129 if (FirstRealElt ==
M.end())
134 APInt ExpectedElt =
APInt(MaskBits, *FirstRealElt + 1);
140 [&ExpectedElt](
int Elt) { return Elt != ExpectedElt++ && Elt >= 0; }))
150 bool ReverseExt =
false;
162 return std::make_pair(ReverseExt, Imm);
168 WhichResult = (
M[0] == 0 ? 0 : 1);
169 for (
unsigned i = 0; i != NumElts; ++i) {
173 if (
static_cast<unsigned>(M[i]) != 2 * i + WhichResult)
181bool isZipMask(
ArrayRef<int> M,
unsigned NumElts,
unsigned &WhichResult) {
182 if (NumElts % 2 != 0)
186 WhichResult = (
M[0] == 0 ? 0 : 1);
187 unsigned Idx = WhichResult * NumElts / 2;
188 for (
unsigned i = 0; i != NumElts; i += 2) {
189 if ((M[i] >= 0 &&
static_cast<unsigned>(M[i]) !=
Idx) ||
190 (
M[i + 1] >= 0 &&
static_cast<unsigned>(
M[i + 1]) !=
Idx + NumElts))
206 int NumInputElements) {
207 if (
M.size() !=
static_cast<size_t>(NumInputElements))
209 int NumLHSMatch = 0, NumRHSMatch = 0;
210 int LastLHSMismatch = -1, LastRHSMismatch = -1;
211 for (
int Idx = 0;
Idx < NumInputElements; ++
Idx) {
217 M[
Idx] ==
Idx ? ++NumLHSMatch : LastLHSMismatch =
Idx;
218 M[
Idx] ==
Idx + NumInputElements ? ++NumRHSMatch : LastRHSMismatch =
Idx;
220 const int NumNeededToMatch = NumInputElements - 1;
221 if (NumLHSMatch == NumNeededToMatch)
222 return std::make_pair(
true, LastLHSMismatch);
223 if (NumRHSMatch == NumNeededToMatch)
224 return std::make_pair(
false, LastRHSMismatch);
231 ShuffleVectorPseudo &MatchInfo) {
232 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
236 LLT Ty =
MRI.getType(Dst);
246 if (
isREVMask(ShuffleMask, EltSize, NumElts, 64)) {
247 MatchInfo = ShuffleVectorPseudo(AArch64::G_REV64, Dst, {Src});
260 ShuffleVectorPseudo &MatchInfo) {
261 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
262 unsigned WhichResult;
265 unsigned NumElts =
MRI.getType(Dst).getNumElements();
266 if (!
isTRNMask(ShuffleMask, NumElts, WhichResult))
268 unsigned Opc = (WhichResult == 0) ? AArch64::G_TRN1 : AArch64::G_TRN2;
271 MatchInfo = ShuffleVectorPseudo(Opc, Dst, {V1,
V2});
281 ShuffleVectorPseudo &MatchInfo) {
282 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
283 unsigned WhichResult;
286 unsigned NumElts =
MRI.getType(Dst).getNumElements();
287 if (!
isUZPMask(ShuffleMask, NumElts, WhichResult))
289 unsigned Opc = (WhichResult == 0) ? AArch64::G_UZP1 : AArch64::G_UZP2;
292 MatchInfo = ShuffleVectorPseudo(Opc, Dst, {V1,
V2});
297 ShuffleVectorPseudo &MatchInfo) {
298 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
299 unsigned WhichResult;
302 unsigned NumElts =
MRI.getType(Dst).getNumElements();
303 if (!isZipMask(ShuffleMask, NumElts, WhichResult))
305 unsigned Opc = (WhichResult == 0) ? AArch64::G_ZIP1 : AArch64::G_ZIP2;
308 MatchInfo = ShuffleVectorPseudo(Opc, Dst, {V1,
V2});
315 ShuffleVectorPseudo &MatchInfo) {
334 auto *InsMI =
getOpcodeDef(TargetOpcode::G_INSERT_VECTOR_ELT,
335 MI.getOperand(1).getReg(),
MRI);
339 if (!
getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, InsMI->getOperand(1).getReg(),
347 MatchInfo = ShuffleVectorPseudo(AArch64::G_DUP,
MI.getOperand(0).getReg(),
348 {InsMI->getOperand(2).getReg()});
355 ShuffleVectorPseudo &MatchInfo) {
356 assert(Lane >= 0 &&
"Expected positive lane?");
359 auto *BuildVecMI =
getOpcodeDef(TargetOpcode::G_BUILD_VECTOR,
360 MI.getOperand(1).getReg(),
MRI);
363 Register Reg = BuildVecMI->getOperand(Lane + 1).getReg();
365 ShuffleVectorPseudo(AArch64::G_DUP,
MI.getOperand(0).getReg(), {Reg});
370 ShuffleVectorPseudo &MatchInfo) {
371 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
375 int Lane = *MaybeLane;
379 if (matchDupFromInsertVectorElt(Lane,
MI,
MRI, MatchInfo))
381 if (matchDupFromBuildVector(Lane,
MI,
MRI, MatchInfo))
398 unsigned ExpectedElt =
M[0];
399 for (
unsigned I = 1;
I < NumElts; ++
I) {
403 if (ExpectedElt == NumElts)
408 if (ExpectedElt !=
static_cast<unsigned>(M[
I]))
416 ShuffleVectorPseudo &MatchInfo) {
417 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
419 LLT DstTy =
MRI.getType(Dst);
422 auto Mask =
MI.getOperand(3).getShuffleMask();
425 uint64_t ExtFactor =
MRI.getType(V1).getScalarSizeInBits() / 8;
428 if (!getOpcodeDef<GImplicitDef>(V2,
MRI) ||
429 !isSingletonExtMask(Mask, DstTy))
433 MatchInfo = ShuffleVectorPseudo(AArch64::G_EXT, Dst, {V1, V1,
Imm});
437 std::tie(ReverseExt, Imm) = *ExtInfo;
441 MatchInfo = ShuffleVectorPseudo(AArch64::G_EXT, Dst, {V1,
V2,
Imm});
448 ShuffleVectorPseudo &MatchInfo) {
450 MIRBuilder.buildInstr(MatchInfo.Opc, {MatchInfo.Dst}, MatchInfo.SrcOps);
451 MI.eraseFromParent();
459 if (MatchInfo.SrcOps[2].getImm() == 0)
460 MIRBuilder.buildCopy(MatchInfo.Dst, MatchInfo.SrcOps[0]);
464 MIRBuilder.buildConstant(
LLT::scalar(32), MatchInfo.SrcOps[2].getImm());
465 MIRBuilder.buildInstr(MatchInfo.Opc, {MatchInfo.Dst},
466 {MatchInfo.SrcOps[0], MatchInfo.SrcOps[1], Cst});
468 MI.eraseFromParent();
483 std::tuple<Register, int, Register, int> &MatchInfo) {
484 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
487 int NumElts =
MRI.getType(Dst).getNumElements();
488 auto DstIsLeftAndDstLane =
isINSMask(ShuffleMask, NumElts);
489 if (!DstIsLeftAndDstLane)
493 std::tie(DstIsLeft, DstLane) = *DstIsLeftAndDstLane;
499 int SrcLane = ShuffleMask[DstLane];
500 if (SrcLane >= NumElts) {
505 MatchInfo = std::make_tuple(DstVec, DstLane, SrcVec, SrcLane);
511 std::tuple<Register, int, Register, int> &MatchInfo) {
514 auto ScalarTy =
MRI.getType(Dst).getElementType();
516 int DstLane, SrcLane;
517 std::tie(DstVec, DstLane, SrcVec, SrcLane) = MatchInfo;
522 MI.eraseFromParent();
530 assert(Ty.
isVector() &&
"vector shift count is not a vector type");
537 return Cnt >= 1 && Cnt <= ElementBits;
543 assert(
MI.getOpcode() == TargetOpcode::G_ASHR ||
544 MI.getOpcode() == TargetOpcode::G_LSHR);
545 LLT Ty =
MRI.getType(
MI.getOperand(1).getReg());
553 unsigned Opc =
MI.getOpcode();
554 assert(Opc == TargetOpcode::G_ASHR || Opc == TargetOpcode::G_LSHR);
556 Opc == TargetOpcode::G_ASHR ? AArch64::G_VASHR : AArch64::G_VLSHR;
558 auto ImmDef = MIB.buildConstant(
LLT::scalar(32), Imm);
559 MIB.buildInstr(NewOpc, {
MI.getOperand(0)}, {
MI.getOperand(1), ImmDef});
560 MI.eraseFromParent();
570std::optional<std::pair<uint64_t, CmpInst::Predicate>>
573 const auto &Ty =
MRI.getType(RHS);
577 assert((
Size == 32 ||
Size == 64) &&
"Expected 32 or 64 bit compare only?");
584 uint64_t C = ValAndVReg->Value.getZExtValue();
602 (
Size == 32 &&
static_cast<int32_t
>(
C) == INT32_MIN))
628 if ((
Size == 32 &&
static_cast<int32_t
>(
C) == INT32_MAX) ||
642 if ((
Size == 32 &&
static_cast<uint32_t>(
C) == UINT32_MAX) ||
667bool matchAdjustICmpImmAndPred(
669 std::pair<uint64_t, CmpInst::Predicate> &MatchInfo) {
670 assert(
MI.getOpcode() == TargetOpcode::G_ICMP);
673 if (
auto MaybeNewImmAndPred = tryAdjustICmpImmAndPred(RHS, Pred,
MRI)) {
674 MatchInfo = *MaybeNewImmAndPred;
680void applyAdjustICmpImmAndPred(
681 MachineInstr &
MI, std::pair<uint64_t, CmpInst::Predicate> &MatchInfo,
689 RHS.setReg(Cst->getOperand(0).getReg());
690 MI.getOperand(1).setPredicate(MatchInfo.second);
695 std::pair<unsigned, int> &MatchInfo) {
696 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
698 const LLT SrcTy =
MRI.getType(Src1Reg);
699 const LLT DstTy =
MRI.getType(
MI.getOperand(0).getReg());
718 if (ScalarSize == 64)
719 Opc = AArch64::G_DUPLANE64;
720 else if (ScalarSize == 32)
721 Opc = AArch64::G_DUPLANE32;
724 if (ScalarSize == 32)
725 Opc = AArch64::G_DUPLANE32;
726 else if (ScalarSize == 16)
727 Opc = AArch64::G_DUPLANE16;
731 Opc = AArch64::G_DUPLANE8;
732 else if (ScalarSize == 16)
733 Opc = AArch64::G_DUPLANE16;
737 Opc = AArch64::G_DUPLANE8;
745 MatchInfo.first = Opc;
746 MatchInfo.second = *LaneIdx;
752 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
754 const LLT SrcTy =
MRI.getType(Src1Reg);
756 B.setInstrAndDebugLoc(
MI);
757 auto Lane =
B.buildConstant(
LLT::scalar(64), MatchInfo.second);
763 auto Undef =
B.buildUndef(SrcTy);
765 {Src1Reg, Undef.getReg(0)})
768 B.buildInstr(MatchInfo.first, {MI.getOperand(0).getReg()}, {DupSrc, Lane});
769 MI.eraseFromParent();
773 assert(
MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
782 int64_t Cst =
Splat->getCst();
783 return (Cst != 0 && Cst != -1);
788 B.setInstrAndDebugLoc(
MI);
789 B.buildInstr(AArch64::G_DUP, {
MI.getOperand(0).
getReg()},
791 MI.eraseFromParent();
798 if (!
MRI.hasOneNonDBGUse(CmpOp))
803 if (
MI.getOpcode() == TargetOpcode::G_SEXT_INREG)
805 if (
MI.getOpcode() != TargetOpcode::G_AND)
812 return (Mask == 0xFF || Mask == 0xFFFF || Mask == 0xFFFFFFFF);
816 if (IsSupportedExtend(*Def))
819 unsigned Opc =
Def->getOpcode();
820 if (Opc != TargetOpcode::G_SHL && Opc != TargetOpcode::G_ASHR &&
821 Opc != TargetOpcode::G_LSHR)
828 uint64_t ShiftAmt = MaybeShiftAmt->Value.getZExtValue();
835 if (IsSupportedExtend(*ShiftLHS))
836 return (ShiftAmt <= 4) ? 2 : 1;
838 LLT Ty =
MRI.getType(
Def->getOperand(0).getReg());
842 if ((ShiftSize == 32 && ShiftAmt <= 31) ||
843 (ShiftSize == 64 && ShiftAmt <= 63))
851 assert(
MI.getOpcode() == TargetOpcode::G_ICMP);
877 Register TheLHS = GetRegForProfit(LHS);
878 Register TheRHS = GetRegForProfit(RHS);
892 MI.getOperand(2).setReg(RHS);
893 MI.getOperand(3).setReg(LHS);
904 LLT DstTy =
MRI.getType(LHS);
906 assert(DstTy ==
MRI.getType(RHS) &&
"Src and Dst types must match!");
958 assert(
MI.getOpcode() == TargetOpcode::G_FCMP);
962 LLT DstTy =
MRI.getType(Dst);
966 unsigned EltSize =
MRI.getType(LHS).getScalarSizeInBits();
967 if (EltSize == 16 && !
ST.hasFullFP16())
969 if (EltSize != 16 && EltSize != 32 && EltSize != 64)
978 assert(
MI.getOpcode() == TargetOpcode::G_FCMP);
981 const auto &CmpMI = cast<GFCmp>(
MI);
988 LLT DstTy =
MRI.getType(Dst);
997 if (Pred == CmpInst::Predicate::FCMP_ORD && IsZero) {
1011 ST.getTargetLowering()->getTargetMachine().Options.NoNaNsFPMath;
1013 auto Cmp = getVectorFCMP(
CC, LHS, RHS, IsZero, NoNans,
MRI);
1018 auto Cmp2 = getVectorFCMP(CC2, LHS, RHS, IsZero, NoNans,
MRI);
1019 auto Cmp2Dst = Cmp2(MIB);
1020 auto Cmp1Dst =
Cmp(MIB);
1025 MRI.replaceRegWith(Dst, CmpRes);
1026 MI.eraseFromParent();
1031 assert(
MI.getOpcode() == TargetOpcode::G_STORE);
1033 if (
MRI.getType(DstReg).isVector())
1039 return MRI.getType(SrcReg).getSizeInBits() <= 64;
1045 assert(
MI.getOpcode() == TargetOpcode::G_STORE);
1047 MI.getOperand(0).setReg(SrcReg);
1055 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
1057 LLT DstTy =
MRI.getType(DstReg);
1063 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
1064 B.setInstrAndDebugLoc(
MI);
1066 Helper.lower(
MI, 0,
LLT());
1073 auto &Unmerge = cast<GUnmerge>(
MI);
1074 if (Unmerge.getNumDefs() != 2)
1076 if (!
MRI.use_nodbg_empty(Unmerge.getReg(1)))
1079 LLT DstTy =
MRI.getType(Unmerge.getReg(0));
1091 if (!LowestVal || LowestVal->Value.getZExtValue() != DstTy.
getSizeInBytes())
1094 if (!getOpcodeDef<GImplicitDef>(ExtSrc2,
MRI))
1097 MatchInfo = ExtSrc1;
1107 MI.getOperand(0).setReg(
MI.getOperand(1).getReg());
1108 MI.getOperand(1).setReg(Dst1);
1109 MI.getOperand(2).setReg(SrcReg);
1120 LLT DstTy =
MRI.getType(
MI.getOperand(0).getReg());
1126 unsigned I1Opc =
I1->getOpcode();
1128 if (((I1Opc == TargetOpcode::G_ZEXT && I2Opc == TargetOpcode::G_ZEXT) ||
1129 (I1Opc == TargetOpcode::G_SEXT && I2Opc == TargetOpcode::G_SEXT)) &&
1130 (
MRI.getType(
I1->getOperand(0).getReg()).getScalarSizeInBits() ==
1131 MRI.getType(
I1->getOperand(1).getReg()).getScalarSizeInBits() * 2) &&
1146 assert(
MI.getOpcode() == TargetOpcode::G_MUL &&
1147 "Expected a G_MUL instruction");
1150 LLT DstTy =
MRI.getType(
MI.getOperand(0).getReg());
1155 unsigned I1Opc =
I1->getOpcode();
1157 if (((I1Opc == TargetOpcode::G_ZEXT && I2Opc == TargetOpcode::G_ZEXT) ||
1158 (I1Opc == TargetOpcode::G_SEXT && I2Opc == TargetOpcode::G_SEXT)) &&
1159 (
MRI.getType(
I1->getOperand(0).getReg()).getScalarSizeInBits() ==
1160 MRI.getType(
I1->getOperand(1).getReg()).getScalarSizeInBits() * 2) &&
1164 B.setInstrAndDebugLoc(
MI);
1165 B.buildInstr(
I1->getOpcode() == TargetOpcode::G_ZEXT ? AArch64::G_UMULL
1167 {MI.getOperand(0).getReg()},
1168 {I1->getOperand(1).getReg(), I2->getOperand(1).getReg()});
1169 MI.eraseFromParent();
1174 B.setInstrAndDebugLoc(
MI);
1175 Helper.fewerElementsVector(
1182class AArch64PostLegalizerLoweringImpl :
public Combiner {
1186 const AArch64PostLegalizerLoweringImplRuleConfig &RuleConfig;
1190 AArch64PostLegalizerLoweringImpl(
1193 const AArch64PostLegalizerLoweringImplRuleConfig &RuleConfig,
1196 static const char *
getName() {
return "AArch6400PreLegalizerCombiner"; }
1201#define GET_GICOMBINER_CLASS_MEMBERS
1202#include "AArch64GenPostLegalizeGILowering.inc"
1203#undef GET_GICOMBINER_CLASS_MEMBERS
1206#define GET_GICOMBINER_IMPL
1207#include "AArch64GenPostLegalizeGILowering.inc"
1208#undef GET_GICOMBINER_IMPL
1210AArch64PostLegalizerLoweringImpl::AArch64PostLegalizerLoweringImpl(
1213 const AArch64PostLegalizerLoweringImplRuleConfig &RuleConfig,
1215 :
Combiner(MF, CInfo, TPC, nullptr, CSEInfo),
1216 Helper(Observer,
B,
true), RuleConfig(RuleConfig),
1219#include
"AArch64GenPostLegalizeGILowering.inc"
1228 AArch64PostLegalizerLowering();
1231 return "AArch64PostLegalizerLowering";
1238 AArch64PostLegalizerLoweringImplRuleConfig RuleConfig;
1242void AArch64PostLegalizerLowering::getAnalysisUsage(
AnalysisUsage &AU)
const {
1249AArch64PostLegalizerLowering::AArch64PostLegalizerLowering()
1253 if (!RuleConfig.parseCommandLineOption())
1257bool AArch64PostLegalizerLowering::runOnMachineFunction(
MachineFunction &MF) {
1259 MachineFunctionProperties::Property::FailedISel))
1262 MachineFunctionProperties::Property::Legalized) &&
1263 "Expected a legalized function?");
1264 auto *TPC = &getAnalysis<TargetPassConfig>();
1270 F.hasOptSize(),
F.hasMinSize());
1271 AArch64PostLegalizerLoweringImpl Impl(MF, CInfo, TPC,
nullptr,
1273 return Impl.combineMachineInstrs();
1276char AArch64PostLegalizerLowering::ID = 0;
1278 "Lower AArch64 MachineInstrs after legalization",
false,
1287 return new AArch64PostLegalizerLowering();
unsigned const MachineRegisterInfo * MRI
static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, int64_t &Cnt)
isVShiftRImm - Check if this is a valid build_vector for the immediate operand of a vector shift righ...
static bool isLegalArithImmed(uint64_t C)
static bool isREVMask(ArrayRef< int > M, EVT VT, unsigned BlockSize)
isREVMask - Check if a vector shuffle corresponds to a REV instruction with the specified blocksize.
static bool isUZPMask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
static bool isTRNMask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
static bool isINSMask(ArrayRef< int > M, int NumInputElements, bool &DstIsLeft, int &Anomaly)
static unsigned getCmpOperandFoldingProfit(SDValue Op)
Returns how profitable it is to fold a comparison's operand's shift and/or extension operations.
static bool isCMN(SDValue Op, ISD::CondCode CC)
This file declares the targeting of the Machinelegalizer class for AArch64.
#define GET_GICOMBINER_CONSTRUCTOR_INITS
Lower AArch64 MachineInstrs after legalization
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This contains common combine transformations that may be used in a combine pass,or by the target else...
Option class for Targets to specify which operations are combined how and when.
This contains the base class for all Combiners generated by TableGen.
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This contains common code to allow clients to notify changes to machine instr.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
static StringRef getName(Value *V)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static const int BlockSize
Target-Independent Code Generator Pass Configuration Options pass.
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
unsigned logBase2() const
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
void setPreservesCFG()
This function should be called by the pass, iff they do not:
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
virtual bool tryCombineAll(MachineInstr &I) const =0
FunctionPass class - This class is used to implement most global optimizations.
Abstract class that contains various methods for clients to notify about changes.
virtual void changingInstr(MachineInstr &MI)=0
This instruction is about to be mutated in some way.
virtual void changedInstr(MachineInstr &MI)=0
This instruction was mutated in some way.
constexpr unsigned getScalarSizeInBits() const
constexpr LLT multiplyElements(int Factor) const
Produce a vector type that is Factor times bigger, preserving the element type.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
constexpr ElementCount getElementCount() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr LLT changeElementCount(ElementCount EC) const
Return a vector or scalar with the same element type and the new element count.
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
bool hasProperty(Property P) const
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineFunctionProperties & getProperties() const
Get the function properties.
Helper class to build MachineInstr.
MachineInstrBuilder buildNot(const DstOp &Dst, const SrcOp &Src0)
Build and insert a bitwise not, NegOne = G_CONSTANT -1 Res = G_OR Op0, NegOne.
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
void setInstrAndDebugLoc(MachineInstr &MI)
Set the insertion point to before MI, and set the debug loc to MI's loc.
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildOr(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_OR Op0, Op1.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Register getReg(unsigned Idx) const
Get the register for the operand index.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Wrapper class representing virtual and physical registers.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Target-Independent Code Generator Pass Configuration Options.
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
std::optional< RegOrConstant > getAArch64VectorSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI)
void changeVectorFCMPPredToAArch64CC(const CmpInst::Predicate P, AArch64CC::CondCode &CondCode, AArch64CC::CondCode &CondCode2, bool &Invert)
Find the AArch64 condition codes necessary to represent P for a vector floating point comparison.
std::optional< int64_t > getAArch64VectorSplatScalar(const MachineInstr &MI, const MachineRegisterInfo &MRI)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
operand_type_match m_Reg()
SpecificConstantMatch m_ZeroInt()
{ Convenience matchers for specific integer values.
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
UnaryOp_match< SrcTy, TargetOpcode::G_TRUNC > m_GTrunc(const SrcTy &Src)
@ Undef
Value of the register doesn't matter.
Reg
All possible values of the reg field in the ModR/M byte.
NodeAddr< DefNode * > Def
This is an optimization pass for GlobalISel generic memory operations.
MachineInstr * getOpcodeDef(unsigned Opcode, Register Reg, const MachineRegisterInfo &MRI)
See if Reg is defined by an single def instruction that is Opcode.
void initializeAArch64PostLegalizerLoweringPass(PassRegistry &)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
FunctionPass * createAArch64PostLegalizerLowering()
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
int getSplatIndex(ArrayRef< int > Mask)
If all non-negative Mask elements are the same value, return that value.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.