51#define GET_GICOMBINER_DEPS
52#include "AArch64GenPostLegalizeGILowering.inc"
53#undef GET_GICOMBINER_DEPS
55#define DEBUG_TYPE "aarch64-postlegalizer-lowering"
61#define GET_GICOMBINER_TYPES
62#include "AArch64GenPostLegalizeGILowering.inc"
63#undef GET_GICOMBINER_TYPES
70struct ShuffleVectorPseudo {
75 std::initializer_list<SrcOp> SrcOps)
76 :
Opc(
Opc), Dst(Dst), SrcOps(SrcOps){};
77 ShuffleVectorPseudo() =
default;
83 assert(
MI.getOpcode() == TargetOpcode::G_FCONSTANT);
86 if (DstSize != 16 && DstSize != 32 && DstSize != 64)
98 assert(
MI.getOpcode() == TargetOpcode::G_FCONSTANT);
100 const APFloat &ImmValAPF =
MI.getOperand(1).getFPImm()->getValueAPF();
101 const Register DstReg =
MI.getOperand(0).getReg();
103 MI.eraseFromParent();
108std::optional<std::pair<bool, uint64_t>> getExtMask(
ArrayRef<int> M,
111 auto FirstRealElt =
find_if(M, [](
int Elt) {
return Elt >= 0; });
112 if (FirstRealElt == M.end())
117 APInt ExpectedElt =
APInt(MaskBits, *FirstRealElt + 1,
false,
true);
123 [&ExpectedElt](
int Elt) { return Elt != ExpectedElt++ && Elt >= 0; }))
133 bool ReverseExt =
false;
145 return std::make_pair(ReverseExt, Imm);
157 int NumInputElements) {
158 if (M.size() !=
static_cast<size_t>(NumInputElements))
160 int NumLHSMatch = 0, NumRHSMatch = 0;
161 int LastLHSMismatch = -1, LastRHSMismatch = -1;
162 for (
int Idx = 0; Idx < NumInputElements; ++Idx) {
168 M[Idx] == Idx ? ++NumLHSMatch : LastLHSMismatch = Idx;
169 M[Idx] == Idx + NumInputElements ? ++NumRHSMatch : LastRHSMismatch = Idx;
171 const int NumNeededToMatch = NumInputElements - 1;
172 if (NumLHSMatch == NumNeededToMatch)
173 return std::make_pair(
true, LastLHSMismatch);
174 if (NumRHSMatch == NumNeededToMatch)
175 return std::make_pair(
false, LastRHSMismatch);
182 ShuffleVectorPseudo &MatchInfo) {
183 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
194 unsigned NumElts = Ty.getNumElements();
197 for (
unsigned LaneSize : {64U, 32U, 16U}) {
198 if (
isREVMask(ShuffleMask, EltSize, NumElts, LaneSize)) {
201 Opcode = AArch64::G_REV64;
202 else if (LaneSize == 32U)
203 Opcode = AArch64::G_REV32;
205 Opcode = AArch64::G_BSWAP;
207 MatchInfo = ShuffleVectorPseudo(Opcode, Dst, {Src});
218 ShuffleVectorPseudo &MatchInfo) {
219 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
220 unsigned WhichResult;
221 unsigned OperandOrder;
225 if (!
isTRNMask(ShuffleMask, NumElts, WhichResult, OperandOrder))
227 unsigned Opc = (WhichResult == 0) ? AArch64::G_TRN1 : AArch64::G_TRN2;
228 Register V1 =
MI.getOperand(OperandOrder == 0 ? 1 : 2).getReg();
229 Register V2 =
MI.getOperand(OperandOrder == 0 ? 2 : 1).getReg();
230 MatchInfo = ShuffleVectorPseudo(
Opc, Dst, {V1, V2});
240 ShuffleVectorPseudo &MatchInfo) {
241 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
242 unsigned WhichResult;
246 if (!
isUZPMask(ShuffleMask, NumElts, WhichResult))
248 unsigned Opc = (WhichResult == 0) ? AArch64::G_UZP1 : AArch64::G_UZP2;
251 MatchInfo = ShuffleVectorPseudo(
Opc, Dst, {V1, V2});
256 ShuffleVectorPseudo &MatchInfo) {
257 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
258 unsigned WhichResult;
259 unsigned OperandOrder;
263 if (!
isZIPMask(ShuffleMask, NumElts, WhichResult, OperandOrder))
265 unsigned Opc = (WhichResult == 0) ? AArch64::G_ZIP1 : AArch64::G_ZIP2;
266 Register V1 =
MI.getOperand(OperandOrder == 0 ? 1 : 2).getReg();
267 Register V2 =
MI.getOperand(OperandOrder == 0 ? 2 : 1).getReg();
268 MatchInfo = ShuffleVectorPseudo(
Opc, Dst, {V1, V2});
275 ShuffleVectorPseudo &MatchInfo) {
294 auto *InsMI =
getOpcodeDef(TargetOpcode::G_INSERT_VECTOR_ELT,
295 MI.getOperand(1).getReg(), MRI);
299 if (!
getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, InsMI->getOperand(1).getReg(),
307 MatchInfo = ShuffleVectorPseudo(AArch64::G_DUP,
MI.getOperand(0).getReg(),
308 {InsMI->getOperand(2).getReg()});
315 ShuffleVectorPseudo &MatchInfo) {
316 assert(Lane >= 0 &&
"Expected positive lane?");
322 MI.getOperand(Lane < NumElements ? 1 : 2).getReg(), MRI);
324 if (NumElements <= Lane)
329 Register Reg = BuildVecMI->getOperand(Lane + 1).getReg();
331 ShuffleVectorPseudo(AArch64::G_DUP,
MI.getOperand(0).getReg(), {Reg});
336 ShuffleVectorPseudo &MatchInfo) {
337 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
341 int Lane = *MaybeLane;
345 if (matchDupFromInsertVectorElt(Lane,
MI, MRI, MatchInfo))
347 if (matchDupFromBuildVector(Lane,
MI, MRI, MatchInfo))
355 unsigned NumElts = Ty.getNumElements();
364 unsigned ExpectedElt = M[0];
365 for (
unsigned I = 1;
I < NumElts; ++
I) {
369 if (ExpectedElt == NumElts)
374 if (ExpectedElt !=
static_cast<unsigned>(M[
I]))
382 ShuffleVectorPseudo &MatchInfo) {
383 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
388 auto Mask =
MI.getOperand(3).getShuffleMask();
395 !isSingletonExtMask(Mask, DstTy))
398 Imm = Mask[0] * ExtFactor;
399 MatchInfo = ShuffleVectorPseudo(AArch64::G_EXT, Dst, {V1, V1, Imm});
403 std::tie(ReverseExt, Imm) = *ExtInfo;
407 MatchInfo = ShuffleVectorPseudo(AArch64::G_EXT, Dst, {V1, V2, Imm});
414 ShuffleVectorPseudo &MatchInfo) {
416 if (MatchInfo.Opc == TargetOpcode::G_BSWAP) {
417 assert(MatchInfo.SrcOps.size() == 1);
425 auto BS1 = MIRBuilder.
buildInstr(TargetOpcode::G_BITCAST, {BSTy},
426 MatchInfo.SrcOps[0]);
427 auto BS2 = MIRBuilder.
buildInstr(MatchInfo.Opc, {BSTy}, {BS1});
428 MIRBuilder.
buildInstr(TargetOpcode::G_BITCAST, {MatchInfo.Dst}, {BS2});
430 MIRBuilder.
buildInstr(MatchInfo.Opc, {MatchInfo.Dst}, MatchInfo.SrcOps);
431 MI.eraseFromParent();
439 if (MatchInfo.SrcOps[2].getImm() == 0)
440 MIRBuilder.
buildCopy(MatchInfo.Dst, MatchInfo.SrcOps[0]);
444 MatchInfo.SrcOps[2].getImm());
445 MIRBuilder.
buildInstr(MatchInfo.Opc, {MatchInfo.Dst},
446 {MatchInfo.SrcOps[0], MatchInfo.SrcOps[1], Cst});
448 MI.eraseFromParent();
456 "Expected 128bit vector in applyFullRev");
459 auto Rev = MIRBuilder.
buildInstr(AArch64::G_REV64, {DstTy}, {Src});
460 MIRBuilder.
buildInstr(AArch64::G_EXT, {Dst}, {Rev, Rev, Cst});
461 MI.eraseFromParent();
465 assert(
MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT);
475 Builder.setInstrAndDebugLoc(Insert);
479 LLT EltTy = MRI.
getType(Insert.getElementReg());
480 LLT IdxTy = MRI.
getType(Insert.getIndexReg());
493 auto StackTemp = Builder.buildFrameIndex(FramePtrTy, FrameIdx);
495 Builder.buildStore(Insert.getOperand(1), StackTemp, PtrInfo,
Align(8));
500 "Expected a power-2 vector size");
501 auto Mask = Builder.buildConstant(IdxTy, VecTy.
getNumElements() - 1);
503 auto EltSize = Builder.buildConstant(IdxTy, EltTy.
getSizeInBytes());
506 Builder.buildPtrAdd(MRI.
getType(StackTemp.getReg(0)), StackTemp,
Mul)
510 Builder.buildStore(Insert.getElementReg(), EltPtr, PtrInfo,
Align(1));
512 Builder.buildLoad(Insert.getReg(0), StackTemp, PtrInfo,
Align(8));
513 Insert.eraseFromParent();
528 std::tuple<Register, int, Register, int> &MatchInfo) {
529 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
533 auto DstIsLeftAndDstLane =
isINSMask(ShuffleMask, NumElts);
534 if (!DstIsLeftAndDstLane)
538 std::tie(DstIsLeft, DstLane) = *DstIsLeftAndDstLane;
544 int SrcLane = ShuffleMask[DstLane];
545 if (SrcLane >= NumElts) {
550 MatchInfo = std::make_tuple(DstVec, DstLane, SrcVec, SrcLane);
556 std::tuple<Register, int, Register, int> &MatchInfo) {
557 Builder.setInstrAndDebugLoc(
MI);
561 int DstLane, SrcLane;
562 std::tie(DstVec, DstLane, SrcVec, SrcLane) = MatchInfo;
563 auto SrcCst = Builder.buildConstant(
LLT::integer(64), SrcLane);
564 auto Extract = Builder.buildExtractVectorElement(ScalarTy, SrcVec, SrcCst);
565 auto DstCst = Builder.buildConstant(
LLT::integer(64), DstLane);
566 Builder.buildInsertVectorElement(Dst, DstVec, Extract, DstCst);
567 MI.eraseFromParent();
575 assert(Ty.isVector() &&
"vector shift count is not a vector type");
581 int64_t ElementBits = Ty.getScalarSizeInBits();
582 return Cnt >= 1 && Cnt <= ElementBits;
588 assert(
MI.getOpcode() == TargetOpcode::G_ASHR ||
589 MI.getOpcode() == TargetOpcode::G_LSHR);
598 unsigned Opc =
MI.getOpcode();
599 assert(
Opc == TargetOpcode::G_ASHR ||
Opc == TargetOpcode::G_LSHR);
601 Opc == TargetOpcode::G_ASHR ? AArch64::G_VASHR : AArch64::G_VLSHR;
603 MIB.
buildInstr(NewOpc, {
MI.getOperand(0)}, {
MI.getOperand(1)}).addImm(Imm);
604 MI.eraseFromParent();
614std::optional<std::pair<uint64_t, CmpInst::Predicate>>
620 unsigned Size = Ty.getSizeInBits();
621 assert((
Size == 32 ||
Size == 64) &&
"Expected 32 or 64 bit compare only?");
628 uint64_t OriginalC = ValAndVReg->Value.getZExtValue();
647 (
Size == 32 &&
static_cast<int32_t
>(
C) == INT32_MIN))
660 assert(
C != 0 &&
"C should not be zero here!");
672 if ((
Size == 32 &&
static_cast<int32_t
>(
C) == INT32_MAX) ||
686 if ((
Size == 32 &&
static_cast<uint32_t>(
C) == UINT32_MAX) ||
701 auto NumberOfInstrToLoadImm = [=](
uint64_t Imm) {
707 if (NumberOfInstrToLoadImm(OriginalC) > NumberOfInstrToLoadImm(
C))
721bool matchAdjustICmpImmAndPred(
723 std::pair<uint64_t, CmpInst::Predicate> &MatchInfo) {
724 assert(
MI.getOpcode() == TargetOpcode::G_ICMP);
727 if (
auto MaybeNewImmAndPred = tryAdjustICmpImmAndPred(
RHS, Pred, MRI)) {
728 MatchInfo = *MaybeNewImmAndPred;
734void applyAdjustICmpImmAndPred(
735 MachineInstr &
MI, std::pair<uint64_t, CmpInst::Predicate> &MatchInfo,
743 RHS.setReg(Cst->getOperand(0).getReg());
744 MI.getOperand(1).setPredicate(MatchInfo.second);
749 std::pair<unsigned, int> &MatchInfo) {
750 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
753 const LLT DstTy = MRI.
getType(
MI.getOperand(0).getReg());
760 if (*LaneIdx >= SrcTy.getNumElements())
770 switch (SrcTy.getNumElements()) {
772 if (ScalarSize == 64)
773 Opc = AArch64::G_DUPLANE64;
774 else if (ScalarSize == 32)
775 Opc = AArch64::G_DUPLANE32;
778 if (ScalarSize == 32)
779 Opc = AArch64::G_DUPLANE32;
780 else if (ScalarSize == 16)
781 Opc = AArch64::G_DUPLANE16;
785 Opc = AArch64::G_DUPLANE8;
786 else if (ScalarSize == 16)
787 Opc = AArch64::G_DUPLANE16;
791 Opc = AArch64::G_DUPLANE8;
799 MatchInfo.first =
Opc;
800 MatchInfo.second = *LaneIdx;
806 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
810 B.setInstrAndDebugLoc(
MI);
811 auto Lane =
B.buildConstant(
LLT::integer(64), MatchInfo.second);
816 if (SrcTy.getSizeInBits() == 64) {
817 auto Undef =
B.buildUndef(SrcTy);
818 DupSrc =
B.buildConcatVectors(SrcTy.multiplyElements(2),
819 {Src1Reg, Undef.getReg(0)})
822 B.buildInstr(MatchInfo.first, {MI.getOperand(0).getReg()}, {DupSrc, Lane});
823 MI.eraseFromParent();
828 Register Src1Reg = Unmerge.getReg(Unmerge.getNumOperands() - 1);
830 if (SrcTy.getSizeInBits() != 128 && SrcTy.getSizeInBits() != 64)
832 return SrcTy.
isVector() && !SrcTy.isScalable() &&
833 Unmerge.getNumOperands() == (
unsigned)SrcTy.getNumElements() + 1;
839 Register Src1Reg = Unmerge.getReg(Unmerge.getNumOperands() - 1);
841 assert((SrcTy.isVector() && !SrcTy.isScalable()) &&
842 "Expected a fixed length vector");
844 for (
int I = 0;
I < SrcTy.getNumElements(); ++
I)
845 B.buildExtractVectorElementConstant(Unmerge.getReg(
I), Src1Reg,
I);
846 MI.eraseFromParent();
850 assert(
MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
863 B.setInstrAndDebugLoc(
MI);
864 B.buildInstr(AArch64::G_DUP, {
MI.getOperand(0).getReg()},
865 {
MI.getOperand(1).getReg()});
866 MI.eraseFromParent();
878 if (
MI.getOpcode() == TargetOpcode::G_SEXT_INREG)
880 if (
MI.getOpcode() != TargetOpcode::G_AND)
886 uint64_t Mask = ValAndVReg->Value.getZExtValue();
887 return (Mask == 0xFF || Mask == 0xFFFF || Mask == 0xFFFFFFFF);
891 if (IsSupportedExtend(*Def))
894 unsigned Opc = Def->getOpcode();
895 if (
Opc != TargetOpcode::G_SHL &&
Opc != TargetOpcode::G_ASHR &&
896 Opc != TargetOpcode::G_LSHR)
903 uint64_t ShiftAmt = MaybeShiftAmt->Value.getZExtValue();
910 if (IsSupportedExtend(*ShiftLHS))
911 return (ShiftAmt <= 4) ? 2 : 1;
913 LLT Ty = MRI.
getType(Def->getOperand(0).getReg());
917 if ((ShiftSize == 32 && ShiftAmt <= 31) ||
918 (ShiftSize == 64 && ShiftAmt <= 63))
926 assert(
MI.getOpcode() == TargetOpcode::G_ICMP);
947 return isCMN(Def, Pred, MRI) ? Def->getOperand(2).getReg() :
Reg;
967 MI.getOperand(2).setReg(
RHS);
968 MI.getOperand(3).setReg(
LHS);
1016 assert(
MI.getOpcode() == TargetOpcode::G_FCMP);
1021 if (!DstTy.
isVector() || !ST.hasNEON())
1025 if (EltSize == 16 && !ST.hasFullFP16())
1027 if (EltSize != 16 && EltSize != 32 && EltSize != 64)
1036 assert(
MI.getOpcode() == TargetOpcode::G_FCMP);
1047 bool Invert =
false;
1068 auto Cmp = getVectorFCMP(CC,
LHS,
RHS, NoNans, MRI);
1073 auto Cmp2 = getVectorFCMP(CC2,
LHS,
RHS, NoNans, MRI);
1074 auto Cmp2Dst = Cmp2(MIB);
1075 auto Cmp1Dst = Cmp(MIB);
1081 MI.eraseFromParent();
1089 for (
unsigned I = 0;
I < GBuildVec->getNumSources(); ++
I) {
1093 if (!ConstVal.has_value())
1103 LLT DstTy = MRI.
getType(GBuildVec->getReg(0));
1104 Register DstReg =
B.buildUndef(DstTy).getReg(0);
1106 for (
unsigned I = 0;
I < GBuildVec->getNumSources(); ++
I) {
1107 Register SrcReg = GBuildVec->getSourceReg(
I);
1112 B.buildInsertVectorElement(DstTy, DstReg, SrcReg, IdxReg).getReg(0);
1114 B.buildCopy(GBuildVec->getReg(0), DstReg);
1115 GBuildVec->eraseFromParent();
1120 assert(
MI.getOpcode() == TargetOpcode::G_STORE);
1134 assert(
MI.getOpcode() == TargetOpcode::G_STORE);
1136 MI.getOperand(0).setReg(SrcReg);
1144 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
1152 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
1153 B.setInstrAndDebugLoc(
MI);
1163 if (Unmerge.getNumDefs() != 2)
1180 if (!LowestVal || LowestVal->Value.getZExtValue() != DstTy.
getSizeInBytes())
1186 MatchInfo = ExtSrc1;
1196 MI.getOperand(0).setReg(
MI.getOperand(1).getReg());
1197 MI.getOperand(1).setReg(Dst1);
1198 MI.getOperand(2).setReg(SrcReg);
1215 assert(
MI.getOpcode() == TargetOpcode::G_MUL &&
1216 "Expected a G_MUL instruction");
1227class AArch64PostLegalizerLoweringImpl :
public Combiner {
1230 const AArch64PostLegalizerLoweringImplRuleConfig &RuleConfig;
1234 AArch64PostLegalizerLoweringImpl(
1236 const AArch64PostLegalizerLoweringImplRuleConfig &RuleConfig,
1239 static const char *
getName() {
return "AArch6400PreLegalizerCombiner"; }
1244#define GET_GICOMBINER_CLASS_MEMBERS
1245#include "AArch64GenPostLegalizeGILowering.inc"
1246#undef GET_GICOMBINER_CLASS_MEMBERS
1249#define GET_GICOMBINER_IMPL
1250#include "AArch64GenPostLegalizeGILowering.inc"
1251#undef GET_GICOMBINER_IMPL
1253AArch64PostLegalizerLoweringImpl::AArch64PostLegalizerLoweringImpl(
1255 const AArch64PostLegalizerLoweringImplRuleConfig &RuleConfig,
1257 :
Combiner(MF, CInfo, nullptr, CSEInfo),
1258 Helper(Observer,
B,
true), RuleConfig(RuleConfig),
1261#include
"AArch64GenPostLegalizeGILowering.inc"
1266bool runPostLegalizerLowering(
1268 const AArch64PostLegalizerLoweringImplRuleConfig &RuleConfig) {
1276 F.hasOptSize(),
F.hasMinSize());
1278 CInfo.MaxIterations = 1;
1281 CInfo.EnableFullDCE =
false;
1282 AArch64PostLegalizerLoweringImpl Impl(MF, CInfo,
nullptr,
1284 return Impl.combineMachineInstrs();
1291 AArch64PostLegalizerLoweringLegacy();
1293 StringRef getPassName()
const override {
1294 return "AArch64PostLegalizerLowering";
1297 bool runOnMachineFunction(MachineFunction &MF)
override;
1298 void getAnalysisUsage(AnalysisUsage &AU)
const override;
1301 AArch64PostLegalizerLoweringImplRuleConfig RuleConfig;
1305void AArch64PostLegalizerLoweringLegacy::getAnalysisUsage(
1312AArch64PostLegalizerLoweringLegacy::AArch64PostLegalizerLoweringLegacy()
1313 : MachineFunctionPass(
ID) {
1314 if (!RuleConfig.parseCommandLineOption())
1318bool AArch64PostLegalizerLoweringLegacy::runOnMachineFunction(
1321 return runPostLegalizerLowering(MF, RuleConfig);
1324char AArch64PostLegalizerLoweringLegacy::ID = 0;
1326 "Lower AArch64 MachineInstrs after legalization",
false,
1329 "Lower AArch64 MachineInstrs after legalization",
false,
1334 std::make_unique<AArch64PostLegalizerLoweringImplRuleConfig>()) {
1335 if (!RuleConfig->parseCommandLineOption())
1348 const bool Changed = runPostLegalizerLowering(MF, *RuleConfig);
1360 return new AArch64PostLegalizerLoweringLegacy();
static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, int64_t &Cnt)
isVShiftRImm - Check if this is a valid build_vector for the immediate operand of a vector shift righ...
static bool isINSMask(ArrayRef< int > M, int NumInputElements, bool &DstIsLeft, int &Anomaly)
static unsigned getCmpOperandFoldingProfit(SDValue Op)
Returns how profitable it is to fold a comparison's operand's shift and/or extension operations.
This file declares the targeting of the Machinelegalizer class for AArch64.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
#define GET_GICOMBINER_CONSTRUCTOR_INITS
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This contains common combine transformations that may be used in a combine pass,or by the target else...
Option class for Targets to specify which operations are combined how and when.
This contains the base class for all Combiners generated by TableGen.
This contains common code to allow clients to notify changes to machine instr.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
Promote Memory to Register
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
static StringRef getName(Value *V)
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
~AArch64PostLegalizerLoweringPass()
AArch64PostLegalizerLoweringPass()
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
unsigned logBase2() const
Represent the analysis usage information of a pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Represents analyses that only rely on functions' control flow.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
FunctionPass class - This class is used to implement most global optimizations.
Abstract class that contains various methods for clients to notify about changes.
virtual void changingInstr(MachineInstr &MI)=0
This instruction is about to be mutated in some way.
virtual void changedInstr(MachineInstr &MI)=0
This instruction was mutated in some way.
LLT changeElementCount(ElementCount EC) const
Return a vector or scalar with the same element type and the new element count.
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
constexpr unsigned getScalarSizeInBits() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr ElementCount getElementCount() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
static LLT integer(unsigned SizeInBits)
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
LLVM_ABI LegalizeResult lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty)
Legalize an instruction by splitting it into simpler parts, hopefully understood by the target.
LLVM_ABI LegalizeResult fewerElementsVector(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy)
Legalize a vector instruction by splitting into multiple components, each acting on the same scalar t...
An RAII based helper class to modify MachineFunctionProperties when running pass.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineFunctionProperties & getProperties() const
Get the function properties.
Helper class to build MachineInstr.
MachineInstrBuilder buildNot(const DstOp &Dst, const SrcOp &Src0)
Build and insert a bitwise not, NegOne = G_CONSTANT -1 Res = G_OR Op0, NegOne.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
void setInstrAndDebugLoc(MachineInstr &MI)
Set the insertion point to before MI, and set the debug loc to MI's loc.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildOr(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_OR Op0, Op1.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Register getReg(unsigned Idx) const
Get the register for the operand index.
Representation of each machine instruction.
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI bool hasOneNonDBGUse(Register RegNo) const
hasOneNonDBGUse - Return true if there is exactly one non-Debug use of the specified register.
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
bool use_nodbg_empty(Register RegNo) const
use_nodbg_empty - Return true if there are no non-Debug instructions using the specified register.
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
iterator_range< use_instr_nodbg_iterator > use_nodbg_instructions(Register Reg) const
LLVM_ABI Register cloneVirtualRegister(Register VReg, StringRef Name="")
Create and return a new virtual register in the function with the same attributes as the given regist...
LLVM_ABI void replaceRegWith(Register FromReg, Register ToReg)
replaceRegWith - Replace all instances of FromReg with ToReg in the machine function.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Wrapper class representing virtual and physical registers.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
A Use represents the edge between a Value definition and its users.
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
std::optional< RegOrConstant > getAArch64VectorSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI)
constexpr bool isLegalArithImmed(const uint64_t C)
void changeVectorFCMPPredToAArch64CC(const CmpInst::Predicate P, AArch64CC::CondCode &CondCode, AArch64CC::CondCode &CondCode2, bool &Invert)
Find the AArch64 condition codes necessary to represent P for a vector floating point comparison.
bool isCMN(const MachineInstr *MaybeSub, const CmpInst::Predicate &Pred, const MachineRegisterInfo &MRI)
std::optional< int64_t > getAArch64VectorSplatScalar(const MachineInstr &MI, const MachineRegisterInfo &MRI)
void expandMOVImm(uint64_t Imm, unsigned BitSize, SmallVectorImpl< ImmInsnModel > &Insn)
Expand a MOVi32imm or MOVi64imm pseudo instruction to one or more real move-immediate instructions to...
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
operand_type_match m_Reg()
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
ImplicitDefMatch m_GImplicitDef()
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
UnaryOp_match< SrcTy, TargetOpcode::G_TRUNC > m_GTrunc(const SrcTy &Src)
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI bool isBuildVectorAllZeros(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndef=false)
Return true if the specified instruction is a G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all of the...
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI MachineInstr * getOpcodeDef(unsigned Opcode, Register Reg, const MachineRegisterInfo &MRI)
See if Reg is defined by an single def instruction that is Opcode.
bool isZIPMask(ArrayRef< int > M, unsigned NumElts, unsigned &WhichResultOut, unsigned &OperandOrderOut)
Return true for zip1 or zip2 masks of the form: <0, 8, 1, 9, 2, 10, 3, 11> (WhichResultOut = 0,...
@ Undef
Value of the register doesn't matter.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
LLVM_ABI MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
LLVM_ABI PreservedAnalyses getMachineFunctionPassPreservedAnalyses()
Returns the minimum set of Analyses that all machine function passes must preserve.
FunctionPass * createAArch64PostLegalizerLowering()
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
bool isUZPMask(ArrayRef< int > M, unsigned NumElts, unsigned &WhichResultOut)
Return true for uzp1 or uzp2 masks of the form: <0, 2, 4, 6, 8, 10, 12, 14> or <1,...
bool isREVMask(ArrayRef< int > M, unsigned EltSize, unsigned NumElts, unsigned BlockSize)
isREVMask - Check if a vector shuffle corresponds to a REV instruction with the specified blocksize.
LLVM_ABI std::optional< ValueAndVReg > getAnyConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true, bool LookThroughAnyExt=false)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT or G_FCONST...
LLVM_ABI bool isBuildVectorAllOnes(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndef=false)
Return true if the specified instruction is a G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all of the...
LLVM_ABI void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
bool isTRNMask(ArrayRef< int > M, unsigned NumElts, unsigned &WhichResultOut, unsigned &OperandOrderOut)
Return true for trn1 or trn2 masks of the form: <0, 8, 2, 10, 4, 12, 6, 14> (WhichResultOut = 0,...
LLVM_ABI int getSplatIndex(ArrayRef< int > Mask)
If all non-negative Mask elements are the same value, return that value.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Implement std::hash so that hash_code can be used in STL containers.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
@ SinglePass
Enables Observer-based DCE and additional heuristics that retry combining defined and used instructio...
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.