74#define DEBUG_TYPE "aarch64-mi-peephole-opt"
90 using OpcodePair = std::pair<unsigned, unsigned>;
92 using SplitAndOpcFunc =
93 std::function<std::optional<OpcodePair>(
T,
unsigned,
T &,
T &)>;
95 std::function<void(
MachineInstr &, OpcodePair,
unsigned,
unsigned,
112 template <
typename T>
114 SplitAndOpcFunc<T> SplitAndOpc, BuildMIFunc BuildInstr);
119 template <
typename T>
120 bool visitADDSUB(
unsigned PosOpc,
unsigned NegOpc,
MachineInstr &
MI);
121 template <
typename T>
122 bool visitADDSSUBS(OpcodePair PosOpcs, OpcodePair NegOpcs,
MachineInstr &
MI);
124 template <
typename T>
133 return "AArch64 MI Peephole Optimization pass";
143char AArch64MIPeepholeOpt::ID = 0;
148 "AArch64 MI Peephole Optimization",
false,
false)
152 T UImm =
static_cast<T>(Imm);
189bool AArch64MIPeepholeOpt::visitAND(
201 return splitTwoPartImm<T>(
204 T &Imm1) -> std::optional<OpcodePair> {
205 if (splitBitmaskImm(
Imm,
RegSize, Imm0, Imm1))
206 return std::make_pair(Opc, Opc);
228 if (
MI.getOperand(3).getImm() != 0)
231 if (
MI.getOperand(1).getReg() != AArch64::WZR)
248 if (SrcMI->
getOpcode() == TargetOpcode::COPY &&
255 if (RC != &AArch64::FPR32RegClass &&
256 ((RC != &AArch64::FPR64RegClass && RC != &AArch64::FPR128RegClass) ||
261 CpySrc =
MRI->createVirtualRegister(&AArch64::FPR32RegClass);
263 TII->get(TargetOpcode::COPY), CpySrc)
271 else if (SrcMI->
getOpcode() <= TargetOpcode::GENERIC_OP_END)
276 MRI->replaceRegWith(DefReg, SrcReg);
277 MRI->clearKillFlags(SrcReg);
279 MI.eraseFromParent();
292 if (!
MI.isRegTiedToDefOperand(1))
311 if ((SrcMI->
getOpcode() <= TargetOpcode::GENERIC_OP_END) ||
312 !AArch64::GPR64allRegClass.hasSubClassEq(RC))
318 TII->get(TargetOpcode::SUBREG_TO_REG), DstReg)
320 .
add(
MI.getOperand(2))
321 .
add(
MI.getOperand(3));
324 MI.eraseFromParent();
333 if ((Imm & 0xfff000) == 0 || (Imm & 0xfff) == 0 ||
334 (Imm & ~
static_cast<T>(0xffffff)) != 0)
340 if (
Insn.size() == 1)
344 Imm0 = (Imm >> 12) & 0xfff;
350bool AArch64MIPeepholeOpt::visitADDSUB(
368 if (
MI.getOperand(1).getReg() == AArch64::XZR ||
369 MI.getOperand(1).getReg() == AArch64::WZR)
372 return splitTwoPartImm<T>(
374 [PosOpc, NegOpc](
T Imm,
unsigned RegSize,
T &Imm0,
375 T &Imm1) -> std::optional<OpcodePair> {
377 return std::make_pair(PosOpc, PosOpc);
379 return std::make_pair(NegOpc, NegOpc);
399bool AArch64MIPeepholeOpt::visitADDSSUBS(
404 if (
MI.getOperand(1).getReg() == AArch64::XZR ||
405 MI.getOperand(1).getReg() == AArch64::WZR)
408 return splitTwoPartImm<T>(
412 T &Imm1) -> std::optional<OpcodePair> {
424 if (!NZCVUsed || NZCVUsed->C || NZCVUsed->V)
452 if (L && !
L->isLoopInvariant(
MI))
456 MovMI =
MRI->getUniqueVRegDef(
MI.getOperand(2).getReg());
461 SubregToRegMI =
nullptr;
462 if (MovMI->
getOpcode() == TargetOpcode::SUBREG_TO_REG) {
463 SubregToRegMI = MovMI;
469 if (MovMI->
getOpcode() != AArch64::MOVi32imm &&
470 MovMI->
getOpcode() != AArch64::MOVi64imm)
485bool AArch64MIPeepholeOpt::splitTwoPartImm(
487 SplitAndOpcFunc<T> SplitAndOpc, BuildMIFunc BuildInstr) {
490 "Invalid RegSize for legal immediate peephole optimization");
494 if (!checkMovImmInstr(
MI, MovMI, SubregToRegMI))
506 if (
auto R = SplitAndOpc(Imm,
RegSize, Imm0, Imm1))
519 TII->getRegClass(
TII->get(Opcode.first), 0,
TRI, *MF);
521 TII->getRegClass(
TII->get(Opcode.first), 1,
TRI, *MF);
523 (Opcode.first == Opcode.second)
525 :
TII->getRegClass(
TII->get(Opcode.second), 0,
TRI, *MF);
527 (Opcode.first == Opcode.second)
528 ? FirstInstrOperandRC
529 :
TII->getRegClass(
TII->get(Opcode.second), 1,
TRI, *MF);
534 Register NewTmpReg =
MRI->createVirtualRegister(FirstInstrDstRC);
538 ?
MRI->createVirtualRegister(SecondInstrDstRC)
542 MRI->constrainRegClass(SrcReg, FirstInstrOperandRC);
543 MRI->constrainRegClass(NewTmpReg, SecondInstrOperandRC);
544 if (DstReg != NewDstReg)
545 MRI->constrainRegClass(NewDstReg,
MRI->getRegClass(DstReg));
548 BuildInstr(
MI, Opcode, Imm0, Imm1, SrcReg, NewTmpReg, NewDstReg);
552 if (DstReg != NewDstReg) {
553 MRI->replaceRegWith(DstReg, NewDstReg);
554 MI.getOperand(0).setReg(DstReg);
558 MI.eraseFromParent();
566bool AArch64MIPeepholeOpt::visitINSviGPR(
MachineInstr &
MI,
unsigned Opc) {
583 if (!SrcMI || SrcMI->
getOpcode() != TargetOpcode::COPY)
590 &AArch64::FPR128RegClass) {
600 .
add(
MI.getOperand(1))
601 .
add(
MI.getOperand(2))
607 MI.eraseFromParent();
615 if (!
MI->getOperand(0).isDef() || !
MI->getOperand(0).isReg())
618 if (RC != &AArch64::FPR64RegClass)
620 return MI->getOpcode() > TargetOpcode::GENERIC_OP_END;
632 if (Low64MI->
getOpcode() != AArch64::INSERT_SUBREG)
652 if (!High64MI || High64MI->
getOpcode() != AArch64::INSERT_SUBREG)
655 if (High64MI && High64MI->
getOpcode() == TargetOpcode::COPY)
657 if (!High64MI || (High64MI->
getOpcode() != AArch64::MOVID &&
658 High64MI->
getOpcode() != AArch64::MOVIv2d_ns))
666 MRI->replaceRegWith(OldDef, NewDef);
667 MI.eraseFromParent();
679 MLI = &getAnalysis<MachineLoopInfo>();
682 assert(
MRI->isSSA() &&
"Expected to be run on SSA form!");
684 bool Changed =
false;
688 switch (
MI.getOpcode()) {
691 case AArch64::INSERT_SUBREG:
692 Changed |= visitINSERT(
MI);
694 case AArch64::ANDWrr:
695 Changed |= visitAND<uint32_t>(AArch64::ANDWri,
MI);
697 case AArch64::ANDXrr:
698 Changed |= visitAND<uint64_t>(AArch64::ANDXri,
MI);
700 case AArch64::ORRWrs:
701 Changed |= visitORR(
MI);
703 case AArch64::ADDWrr:
704 Changed |= visitADDSUB<uint32_t>(AArch64::ADDWri, AArch64::SUBWri,
MI);
706 case AArch64::SUBWrr:
707 Changed |= visitADDSUB<uint32_t>(AArch64::SUBWri, AArch64::ADDWri,
MI);
709 case AArch64::ADDXrr:
710 Changed |= visitADDSUB<uint64_t>(AArch64::ADDXri, AArch64::SUBXri,
MI);
712 case AArch64::SUBXrr:
713 Changed |= visitADDSUB<uint64_t>(AArch64::SUBXri, AArch64::ADDXri,
MI);
715 case AArch64::ADDSWrr:
717 visitADDSSUBS<uint32_t>({AArch64::ADDWri, AArch64::ADDSWri},
718 {AArch64::SUBWri, AArch64::SUBSWri},
MI);
720 case AArch64::SUBSWrr:
722 visitADDSSUBS<uint32_t>({AArch64::SUBWri, AArch64::SUBSWri},
723 {AArch64::ADDWri, AArch64::ADDSWri},
MI);
725 case AArch64::ADDSXrr:
727 visitADDSSUBS<uint64_t>({AArch64::ADDXri, AArch64::ADDSXri},
728 {AArch64::SUBXri, AArch64::SUBSXri},
MI);
730 case AArch64::SUBSXrr:
732 visitADDSSUBS<uint64_t>({AArch64::SUBXri, AArch64::SUBSXri},
733 {AArch64::ADDXri, AArch64::ADDSXri},
MI);
735 case AArch64::INSvi64gpr:
736 Changed |= visitINSviGPR(
MI, AArch64::INSvi64lane);
738 case AArch64::INSvi32gpr:
739 Changed |= visitINSviGPR(
MI, AArch64::INSvi32lane);
741 case AArch64::INSvi16gpr:
742 Changed |= visitINSviGPR(
MI, AArch64::INSvi16lane);
744 case AArch64::INSvi8gpr:
745 Changed |= visitINSviGPR(
MI, AArch64::INSvi8lane);
747 case AArch64::INSvi64lane:
748 Changed |= visitINSvi64lane(
MI);
758 return new AArch64MIPeepholeOpt();
unsigned const MachineRegisterInfo * MRI
SmallVector< AArch64_IMM::ImmInsnModel, 4 > Insn
static bool is64bitDefwithZeroHigh64bit(MachineInstr *MI, MachineRegisterInfo *MRI)
static bool splitAddSubImm(T Imm, unsigned RegSize, T &Imm0, T &Imm1)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
const HexagonInstrInfo * TII
unsigned const TargetRegisterInfo * TRI
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
void setPreservesCFG()
This function should be called by the pass, iff they do not:
FunctionPass class - This class is used to implement most global optimizations.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
unsigned getSubReg() const
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
static bool isLogicalImmediate(uint64_t imm, unsigned regSize)
isLogicalImmediate - Return true if the immediate is valid for a logical immediate instruction of the...
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
void expandMOVImm(uint64_t Imm, unsigned BitSize, SmallVectorImpl< ImmInsnModel > &Insn)
Expand a MOVi32imm or MOVi64imm pseudo instruction to one or more real move-immediate instructions to...
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
FunctionPass * createAArch64MIPeepholeOptPass()
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
std::optional< UsedNZCV > examineCFlagsUse(MachineInstr &MI, MachineInstr &CmpInstr, const TargetRegisterInfo &TRI, SmallVectorImpl< MachineInstr * > *CCUseInstrs=nullptr)
unsigned getRegState(const MachineOperand &RegOp)
Get all register state flags from machine operand RegOp.
void initializeAArch64MIPeepholeOptPass(PassRegistry &)