42 cl::desc(
"Disable hazard detection during preRA scheduling"));
53 short RegClass = MCID.
operands()[OpNum].RegClass;
54 if (MCID.
operands()[OpNum].isLookupPtrRegClass())
55 return TRI->getPointerRegClass(MF, RegClass);
62 return TRI->getRegClass(RegClass);
76 unsigned Quantity)
const {
77 for (
unsigned i = 0; i < Quantity; ++i)
104 bool AtInsnStart =
true;
107 for (; *Str; ++Str) {
117 if (AtInsnStart && !isSpace(
static_cast<unsigned char>(*Str))) {
118 unsigned AddLength = MaxInstLength;
119 if (strncmp(Str,
".space", 6) == 0) {
122 SpaceSize = strtol(Str + 6, &EStr, 10);
123 SpaceSize = SpaceSize < 0 ? 0 : SpaceSize;
124 while (*EStr !=
'\n' && isSpace(
static_cast<unsigned char>(*EStr)))
126 if (*EStr ==
'\0' || *EStr ==
'\n' ||
128 AddLength = SpaceSize;
156 if (
MI->shouldUpdateCallSiteInfo())
168 bool NewMI,
unsigned Idx1,
169 unsigned Idx2)
const {
172 if (HasDef && !
MI.getOperand(0).isReg())
176 unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1;
177 unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2;
179 CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 &&
180 "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
181 assert(
MI.getOperand(Idx1).isReg() &&
MI.getOperand(Idx2).isReg() &&
182 "This only knows how to commute register operands so far");
187 unsigned SubReg0 = HasDef ?
MI.getOperand(0).getSubReg() : 0;
188 unsigned SubReg1 =
MI.getOperand(Idx1).getSubReg();
189 unsigned SubReg2 =
MI.getOperand(Idx2).getSubReg();
190 bool Reg1IsKill =
MI.getOperand(Idx1).isKill();
191 bool Reg2IsKill =
MI.getOperand(Idx2).isKill();
192 bool Reg1IsUndef =
MI.getOperand(Idx1).isUndef();
193 bool Reg2IsUndef =
MI.getOperand(Idx2).isUndef();
194 bool Reg1IsInternal =
MI.getOperand(Idx1).isInternalRead();
195 bool Reg2IsInternal =
MI.getOperand(Idx2).isInternalRead();
198 bool Reg1IsRenamable =
199 Reg1.
isPhysical() ?
MI.getOperand(Idx1).isRenamable() :
false;
200 bool Reg2IsRenamable =
201 Reg2.
isPhysical() ?
MI.getOperand(Idx2).isRenamable() :
false;
204 if (HasDef && Reg0 == Reg1 &&
209 }
else if (HasDef && Reg0 == Reg2 &&
250 unsigned OpIdx2)
const {
257 "Precondition violation: MI must be commutable.");
264 unsigned &ResultIdx2,
265 unsigned CommutableOpIdx1,
266 unsigned CommutableOpIdx2) {
269 ResultIdx1 = CommutableOpIdx1;
270 ResultIdx2 = CommutableOpIdx2;
272 if (ResultIdx2 == CommutableOpIdx1)
273 ResultIdx1 = CommutableOpIdx2;
274 else if (ResultIdx2 == CommutableOpIdx2)
275 ResultIdx1 = CommutableOpIdx1;
279 if (ResultIdx1 == CommutableOpIdx1)
280 ResultIdx2 = CommutableOpIdx2;
281 else if (ResultIdx1 == CommutableOpIdx2)
282 ResultIdx2 = CommutableOpIdx1;
288 return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) ||
289 (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1);
296 unsigned &SrcOpIdx2)
const {
298 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
306 unsigned CommutableOpIdx1 = MCID.
getNumDefs();
307 unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1;
309 CommutableOpIdx1, CommutableOpIdx2))
312 if (!
MI.getOperand(SrcOpIdx1).isReg() || !
MI.getOperand(SrcOpIdx2).isReg())
319 if (!
MI.isTerminator())
return false;
322 if (
MI.isBranch() && !
MI.isBarrier())
324 if (!
MI.isPredicable())
331 bool MadeChange =
false;
334 "TargetInstrInfo::PredicateInstruction() can't handle bundles");
337 if (!
MI.isPredicable())
340 for (
unsigned j = 0, i = 0, e =
MI.getNumOperands(); i != e; ++i) {
341 if (MCID.
operands()[i].isPredicate()) {
346 }
else if (MO.
isImm()) {
347 MO.
setImm(Pred[j].getImm());
349 }
else if (MO.
isMBB()) {
350 MO.
setMBB(Pred[j].getMBB());
362 size_t StartSize = Accesses.
size();
364 oe =
MI.memoperands_end();
366 if ((*o)->isLoad() &&
367 isa_and_nonnull<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
370 return Accesses.
size() != StartSize;
376 size_t StartSize = Accesses.
size();
378 oe =
MI.memoperands_end();
380 if ((*o)->isStore() &&
381 isa_and_nonnull<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
384 return Accesses.
size() != StartSize;
388 unsigned SubIdx,
unsigned &
Size,
397 unsigned BitSize =
TRI->getSubRegIdxSize(SubIdx);
402 int BitOffset =
TRI->getSubRegIdxOffset(SubIdx);
403 if (BitOffset < 0 || BitOffset % 8)
423 MI->substituteRegister(
MI->getOperand(0).getReg(), DestReg, SubIdx,
TRI);
444 assert(
MI.isCopy() &&
"MI must be a COPY instruction");
445 if (
MI.getNumOperands() != 2)
447 assert(FoldIdx<2 &&
"FoldIdx refers no nonexistent operand");
475std::pair<unsigned, unsigned>
477 switch (
MI.getOpcode()) {
478 case TargetOpcode::STACKMAP:
481 case TargetOpcode::PATCHPOINT:
485 case TargetOpcode::STATEPOINT:
496 unsigned StartIdx = 0;
497 unsigned NumDefs = 0;
499 std::tie(NumDefs, StartIdx) =
TII.getPatchpointUnfoldableRange(
MI);
501 unsigned DefToFoldIdx =
MI.getNumOperands();
505 for (
unsigned Op : Ops) {
507 assert(DefToFoldIdx ==
MI.getNumOperands() &&
"Folding multiple defs");
509 }
else if (Op < StartIdx) {
512 if (
MI.getOperand(Op).isTied())
521 for (
unsigned i = 0; i < StartIdx; ++i)
522 if (i != DefToFoldIdx)
523 MIB.
add(
MI.getOperand(i));
525 for (
unsigned i = StartIdx, e =
MI.getNumOperands(); i < e; ++i) {
528 (void)
MI.isRegTiedToDefOperand(i, &TiedTo);
531 assert(TiedTo == e &&
"Cannot fold tied operands");
533 unsigned SpillOffset;
538 TII.getStackSlotRange(RC, MO.
getSubReg(), SpillSize, SpillOffset, MF);
541 MIB.
addImm(StackMaps::IndirectMemRefOp);
548 assert(TiedTo < NumDefs &&
"Bad tied operand");
549 if (TiedTo > DefToFoldIdx)
563 for (
unsigned OpIdx : Ops)
568 assert(
MBB &&
"foldMemoryOperand needs an inserted instruction");
581 for (
unsigned OpIdx : Ops) {
584 if (
auto SubReg =
MI.getOperand(OpIdx).getSubReg()) {
585 unsigned SubRegSize =
TRI->getSubRegIdxSize(
SubReg);
586 if (SubRegSize > 0 && !(SubRegSize % 8))
587 OpSize = SubRegSize / 8;
590 MemSize = std::max(MemSize, OpSize);
594 assert(MemSize &&
"Did not expect a zero-sized stack slot");
598 if (
MI.getOpcode() == TargetOpcode::STACKMAP ||
599 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
600 MI.getOpcode() == TargetOpcode::STATEPOINT) {
615 "Folded a def to a non-store!");
618 "Folded a use to a non-load!");
633 if (!
MI.isCopy() || Ops.
size() != 1)
657 for (
unsigned OpIdx : Ops)
658 assert(
MI.getOperand(OpIdx).isUse() &&
"Folding load into def!");
668 if ((
MI.getOpcode() == TargetOpcode::STACKMAP ||
669 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
670 MI.getOpcode() == TargetOpcode::STATEPOINT) &&
685 if (
MI.memoperands_empty()) {
710 MI1 =
MRI.getUniqueVRegDef(Op1.
getReg());
712 MI2 =
MRI.getUniqueVRegDef(Op2.
getReg());
719 unsigned Opcode2)
const {
724 bool &Commuted)
const {
759 bool &Commuted)
const {
789 bool DoRegPressureReduce)
const {
815std::pair<unsigned, unsigned>
825 if (AssocCommutRoot && AssocCommutPrev) {
835 "Incorrectly matched pattern");
836 unsigned AssocCommutOpcode = Root.
getOpcode();
838 if (!AssocCommutRoot)
839 std::swap(AssocCommutOpcode, InverseOpcode);
867 if (!AssocCommutRoot && AssocCommutPrev)
868 return {AssocCommutOpcode, InverseOpcode};
869 if (AssocCommutRoot && !AssocCommutPrev)
870 return {InverseOpcode, InverseOpcode};
871 if (!AssocCommutRoot && !AssocCommutPrev)
872 return {InverseOpcode, AssocCommutOpcode};
875 if (!AssocCommutRoot && AssocCommutPrev)
876 return {AssocCommutOpcode, InverseOpcode};
877 if (AssocCommutRoot && !AssocCommutPrev)
878 return {InverseOpcode, AssocCommutOpcode};
879 if (!AssocCommutRoot && !AssocCommutPrev)
880 return {InverseOpcode, InverseOpcode};
883 if (!AssocCommutRoot && AssocCommutPrev)
884 return {InverseOpcode, InverseOpcode};
885 if (AssocCommutRoot && !AssocCommutPrev)
886 return {AssocCommutOpcode, InverseOpcode};
887 if (!AssocCommutRoot && !AssocCommutPrev)
888 return {InverseOpcode, AssocCommutOpcode};
891 if (!AssocCommutRoot && AssocCommutPrev)
892 return {InverseOpcode, InverseOpcode};
893 if (AssocCommutRoot && !AssocCommutPrev)
894 return {InverseOpcode, AssocCommutOpcode};
895 if (!AssocCommutRoot && !AssocCommutPrev)
896 return {AssocCommutOpcode, InverseOpcode};
910 return {
false,
false};
912 return {
true,
false};
937 unsigned OpIdx[4][4] = {
966 MRI.constrainRegClass(RegA, RC);
968 MRI.constrainRegClass(RegB, RC);
970 MRI.constrainRegClass(RegX, RC);
972 MRI.constrainRegClass(RegY, RC);
974 MRI.constrainRegClass(RegC, RC);
980 InstrIdxForVirtReg.
insert(std::make_pair(NewVR, 0));
983 bool KillA = OpA.
isKill();
984 bool KillX = OpX.
isKill();
985 bool KillY = OpY.
isKill();
986 bool KillNewVR =
true;
990 if (SwapPrevOperands) {
1002 if (SwapRootOperands) {
1066bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(
1072 if (!
MI.getNumOperands() || !
MI.getOperand(0).isReg())
1080 if (DefReg.
isVirtual() &&
MI.getOperand(0).getSubReg() &&
1081 MI.readsVirtualRegister(DefReg))
1093 if (
MI.isNotDuplicable() ||
MI.mayStore() ||
MI.mayRaiseFPException() ||
1094 MI.hasUnmodeledSideEffects())
1099 if (
MI.isInlineAsm())
1103 if (
MI.mayLoad() && !
MI.isDereferenceableInvariantLoad())
1109 if (!MO.isReg())
continue;
1115 if (Reg.isPhysical()) {
1120 if (!
MRI.isConstantPhysReg(Reg))
1131 if (MO.isDef() && Reg != DefReg)
1148 bool StackGrowsDown =
1159 if ((!StackGrowsDown &&
MI.getOpcode() == FrameSetupOpcode) ||
1160 (StackGrowsDown &&
MI.getOpcode() == FrameDestroyOpcode))
1173 if (
MI.isTerminator() ||
MI.isPosition())
1177 if (
MI.getOpcode() == TargetOpcode::INLINEASM_BR)
1225 BaseOps.
size() != 1)
1227 BaseOp = BaseOps.
front();
1237 SDNode *DefNode,
unsigned DefIdx,
1238 SDNode *UseNode,
unsigned UseIdx)
const {
1239 if (!ItinData || ItinData->
isEmpty())
1254 if (!ItinData || ItinData->
isEmpty())
1257 if (!
N->isMachineOpcode())
1269 if (!ItinData || ItinData->
isEmpty())
1272 unsigned Class =
MI.getDesc().getSchedClass();
1285 if (
DefMI.isTransient())
1287 if (
DefMI.mayLoad())
1300 unsigned *PredCost)
const {
1304 return MI.mayLoad() ? 2 : 1;
1311 unsigned DefIdx)
const {
1313 if (!ItinData || ItinData->
isEmpty())
1316 unsigned DefClass =
DefMI.getDesc().getSchedClass();
1318 return (DefCycle != -1 && DefCycle <= 1);
1321std::optional<ParamLoadedValue>
1328 bool OffsetIsScalable;
1336 Register DestReg = DestSrc->Destination->getReg();
1348 assert(!
TRI->isSuperOrSubRegisterEq(Reg, DestReg) &&
1349 "TargetInstrInfo::describeLoadedValue can't describe super- or "
1350 "sub-regs for copy instructions");
1351 return std::nullopt;
1357 }
else if (
MI.hasOneMemOperand()) {
1369 return std::nullopt;
1372 if (!
TII->getMemOperandWithOffset(
MI, BaseOp,
Offset, OffsetIsScalable,
1374 return std::nullopt;
1377 if (OffsetIsScalable)
1378 return std::nullopt;
1386 if (
MI.getNumExplicitDefs() != 1)
1387 return std::nullopt;
1399 return std::nullopt;
1408 unsigned UseIdx)
const {
1409 unsigned DefClass =
DefMI.getDesc().getSchedClass();
1410 unsigned UseClass =
UseMI.getDesc().getSchedClass();
1418 MI.isRegSequenceLike()) &&
"Instruction do not have the proper type");
1420 if (!
MI.isRegSequence())
1425 assert(DefIdx == 0 &&
"REG_SEQUENCE only has one def");
1426 for (
unsigned OpIdx = 1, EndOpIdx =
MI.getNumOperands(); OpIdx != EndOpIdx;
1433 "One of the subindex of the reg_sequence is not an immediate");
1436 (
unsigned)MOSubIdx.
getImm()));
1445 MI.isExtractSubregLike()) &&
"Instruction do not have the proper type");
1447 if (!
MI.isExtractSubreg())
1452 assert(DefIdx == 0 &&
"EXTRACT_SUBREG only has one def");
1458 "The subindex of the extract_subreg is not an immediate");
1470 MI.isInsertSubregLike()) &&
"Instruction do not have the proper type");
1472 if (!
MI.isInsertSubreg())
1477 assert(DefIdx == 0 &&
"INSERT_SUBREG only has one def");
1484 "One of the subindex of the reg_sequence is not an immediate");
1488 InsertedReg.
Reg = MOInsertedReg.
getReg();
1499 if (!
MI.isInlineAsm())
1507 unsigned ExtraInfo = Op.getImm();
1519 int FlagIdx =
MI.findInlineAsmFlagIdx(OpIdx);
1520 if (FlagIdx < 0 || (
unsigned)FlagIdx != OpIdx)
1523 assert(Op.isImm() &&
"Expected flag operand to be an immediate");
1525 unsigned Flag = Op.getImm();
1533 OS <<
':' <<
TRI->getRegClassName(
TRI->getRegClass(RCID));
1535 OS <<
":RC" << RCID;
1543 unsigned TiedTo = 0;
1545 OS <<
" tiedto:$" << TiedTo;
1553 Function &
F, std::vector<outliner::Candidate> &Candidates)
const {
1567 return C.getMF()->
getFunction().hasFnAttribute(Attribute::NoUnwind);
1569 F.addFnAttr(Attribute::NoUnwind);
1578 if (
MI.isCFIInstruction())
1583 if (
MI.isMetaInstruction())
1587 if (
MI.isInlineAsm())
1595 if (
MI.isTerminator()) {
1597 if (!
MI.getParent()->succ_empty())
1622 assert(!MOP.isTargetIndex() &&
"This isn't used quite yet!");
1625 assert(!MOP.isCFIIndex() &&
"CFI instructions handled elsewhere!");
1628 assert(!MOP.isFI() &&
"FrameIndex instructions should be gone by now!");
1630 if (MOP.isMBB() || MOP.isBlockAddress() || MOP.isCPI() || MOP.isJTI())
1639 unsigned &Flags)
const {
1643 if (First !=
MBB.
end() &&
1644 (First->getOpcode() == TargetOpcode::FENTRY_CALL ||
1645 First->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_ENTER))
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
This file contains constants used for implementing Dwarf debug support.
static Function * getFunction(Constant *C)
const HexagonInstrInfo * TII
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isAsmComment(const char *Str, const MCAsmInfo &MAI)
static const TargetRegisterClass * canFoldCopy(const MachineInstr &MI, unsigned FoldIdx)
static std::pair< bool, bool > mustSwapOperands(MachineCombinerPattern Pattern)
static cl::opt< bool > DisableHazardRecognizer("disable-sched-hazard", cl::Hidden, cl::init(false), cl::desc("Disable hazard detection during preRA scheduling"))
static MachineInstr * foldPatchpoint(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, int FrameIndex, const TargetInstrInfo &TII)
This file describes how to lower LLVM code to machine code.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
static void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
static DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
bool isLittleEndian() const
Layout endianness...
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
static bool isMemKind(unsigned Flag)
static StringRef getKindName(unsigned Kind)
static std::vector< StringRef > getExtraInfoNames(unsigned ExtraInfo)
static StringRef getMemConstraintName(unsigned Constraint)
static unsigned getMemoryConstraintID(unsigned Flag)
static bool isUseOperandTiedToDef(unsigned Flag, unsigned &Idx)
isUseOperandTiedToDef - Return true if the flag of the inline asm operand indicates it is an use oper...
static bool isImmKind(unsigned Flag)
static bool hasRegClassConstraint(unsigned Flag, unsigned &RC)
hasRegClassConstraint - Returns true if the flag contains a register class constraint.
static unsigned getKind(unsigned Flags)
Itinerary data supplied by a subtarget to be used by a target.
int getOperandLatency(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Compute and return the use operand latency of a given itinerary class and operand index if the value ...
unsigned getStageLatency(unsigned ItinClassIndx) const
Return the total stage latency of the given class.
int getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
const InstrItinerary * Itineraries
Array of itineraries selected.
bool isEmpty() const
Returns true if there are no itineraries.
This class is intended to be used as a base class for asm properties and features specific to the tar...
virtual unsigned getMaxInstLength(const MCSubtargetInfo *STI=nullptr) const
Returns the maximum possible encoded instruction size in bytes.
StringRef getCommentString() const
const char * getSeparatorString() const
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
unsigned getSchedClass() const
Return the scheduling class for this instruction.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
bool isCommutable() const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z,...
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
iterator getFirstNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the first non-debug instruction in the basic block, or end().
succ_iterator succ_begin()
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void removeSuccessor(MachineBasicBlock *Succ, bool NormalizeSuccProbs=false)
Remove successor from the successors list of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool isImmutableObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to an immutable object.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
bool hasProperty(Property P) const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineInstr & cloneMachineInstrBundle(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig)
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore.
MachineInstr * CreateMachineInstr(const MCInstrDesc &MCID, DebugLoc DL, bool NoImplicit=false)
CreateMachineInstr - Allocate a new MachineInstr.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineFunctionProperties & getProperties() const
Get the function properties.
MachineInstr * CloneMachineInstr(const MachineInstr *Orig)
Create a new MachineInstr which is a copy of Orig, identical in all ways except the instruction has n...
void eraseCallSiteInfo(const MachineInstr *MI)
Following functions update call site info.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
unsigned getNumOperands() const
Retuns the total number of operands.
void setDebugInstrNum(unsigned Num)
Set instruction number of this MachineInstr.
mmo_iterator memoperands_end() const
Access to memory operands of the instruction.
unsigned peekDebugInstrNum() const
Examine the instruction number of this MachineInstr.
void setMemRefs(MachineFunction &MF, ArrayRef< MachineMemOperand * > MemRefs)
Assign this MachineInstr's memory reference descriptor list.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
bool isNotDuplicable(QueryType Type=AnyInBundle) const
Return true if this instruction cannot be safely duplicated.
void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
void cloneInstrSymbols(MachineFunction &MF, const MachineInstr &MI)
Clone another MachineInstr's pre- and post- instruction symbols and replace ours with it.
bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
uint16_t getFlags() const
Return the MI flags bitvector.
const MachineOperand & getOperand(unsigned i) const
bool canFoldAsLoad(QueryType Type=IgnoreBundle) const
Return true for instructions that can be folded as memory operands in other instructions.
const TargetRegisterClass * getRegClassConstraint(unsigned OpIdx, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
Compute the static register class constraint for operand OpIdx.
void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
A description of a memory reference used in the backend.
const PseudoSourceValue * getPseudoValue() const
uint64_t getSize() const
Return the size in bytes of the memory reference.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
unsigned getSubReg() const
void setIsInternalRead(bool Val=true)
void setImm(int64_t immVal)
void setIsRenamable(bool Val=true)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setReg(Register Reg)
Change the register this operand corresponds to.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void setIsKill(bool Val=true)
void setMBB(MachineBasicBlock *MBB)
void setIsUndef(bool Val=true)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
MI-level patchpoint operands.
Special value supplied for machine level alias analysis.
virtual bool mayAlias(const MachineFrameInfo *) const
Return true if the memory pointed to by this PseudoSourceValue can ever alias an LLVM IR Value.
Wrapper class representing virtual and physical registers.
bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
MI-level Statepoint operands.
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Information about stack frame layout on the target.
StackDirection getStackGrowthDirection() const
getStackGrowthDirection - Return the direction the stack grows
int alignSPAdjust(int SPAdj) const
alignSPAdjust - This method aligns the stack adjustment to the correct alignment.
virtual ~PipelinerLoopInfo()
TargetInstrInfo - Interface to description of machine instruction set.
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Target-dependent implementation for foldMemoryOperand.
virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel, const MachineInstr &DefMI, unsigned DefIdx) const
Compute operand latency of a def of 'Reg'.
virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, MachineInstr &NewMI1, MachineInstr &NewMI2) const
This is an architecture-specific helper function of reassociateOps.
virtual bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, unsigned &Width, const TargetRegisterInfo *TRI) const
Get zero or more base operands and the byte offset of an instruction that reads/writes memory.
virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData, const MachineInstr &MI) const
Return the number of u-operations the given machine instruction will be decoded to on the target cpu.
virtual int getSPAdjust(const MachineInstr &MI) const
Returns the actual stack pointer adjustment made by an instruction as part of a call sequence.
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const
Load the specified register of the given register class from the specified stack frame index.
virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, MachineBasicBlock *NewDest) const
Delete the instruction OldInst and everything after it, replacing it with an unconditional branch to ...
virtual bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const
Convert the instruction into a predicated instruction.
bool areOpcodesEqualOrInverse(unsigned Opcode1, unsigned Opcode2) const
Return true when \P Opcode1 or its inversion is equal to \P Opcode2.
virtual bool getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Target-dependent implementation of getInsertSubregInputs.
virtual std::pair< unsigned, unsigned > getPatchpointUnfoldableRange(const MachineInstr &MI) const
For a patchpoint, stackmap, or statepoint intrinsic, return the range of operands which can't be fold...
outliner::InstrType getOutliningType(MachineBasicBlock::iterator &MIT, unsigned Flags) const
Returns how or if MIT should be outlined.
virtual int getOperandLatency(const InstrItineraryData *ItinData, SDNode *DefNode, unsigned DefIdx, SDNode *UseNode, unsigned UseIdx) const
virtual void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const
Store the specified register of the given register class to the specified stack frame index.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual void mergeOutliningCandidateAttributes(Function &F, std::vector< outliner::Candidate > &Candidates) const
Optional target hook to create the LLVM IR attributes for the outlined function.
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
bool isUnpredicatedTerminator(const MachineInstr &MI) const
Returns true if the instruction is a terminator instruction that has not been predicated.
virtual void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const
Insert a noop into the instruction stream at the specified point.
bool isFrameInstr(const MachineInstr &I) const
Returns true if the argument is a frame pseudo instruction.
virtual bool getRegSequenceLikeInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Target-dependent implementation of getRegSequenceInputs.
virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx, unsigned &Size, unsigned &Offset, const MachineFunction &MF) const
Compute the size in bytes and offset within a stack slot of a spilled register or subregister.
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAGMI *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual bool hasStoreToStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const
If the specified machine instruction has a store to a stack slot, return true along with the FrameInd...
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual unsigned getInlineAsmLength(const char *Str, const MCAsmInfo &MAI, const TargetSubtargetInfo *STI=nullptr) const
Measure the specified inline asm to determine an approximation of its length.
virtual outliner::InstrType getOutliningTypeImpl(MachineBasicBlock::iterator &MIT, unsigned Flags) const
Target-dependent implementation for getOutliningTypeImpl.
virtual std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const
Produce the expression describing the MI loading a value into the physical register Reg.
MachineInstr * foldMemoryOperand(MachineInstr &MI, ArrayRef< unsigned > Ops, int FI, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Attempt to fold a load or store of the specified stack slot into the specified machine instruction fo...
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise).
virtual MCInst getNop() const
Return the noop instruction to use for a noop.
virtual MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore.
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
MachineInstr * commuteInstruction(MachineInstr &MI, bool NewMI=false, unsigned OpIdx1=CommuteAnyOperandIndex, unsigned OpIdx2=CommuteAnyOperandIndex) const
This method commutes the operands of the given machine instruction MI.
virtual void genAlternativeCodeSequence(MachineInstr &Root, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert=false) const
Return true when \P Inst is both associative and commutative.
virtual void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig, const TargetRegisterInfo &TRI) const
Re-issue the specified 'original' instruction at the specific location targeting a new destination re...
virtual std::optional< unsigned > getInverseOpcode(unsigned Opcode) const
Return the inverse operation opcode if it exists for \P Opcode (e.g.
virtual void insertNoops(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned Quantity) const
Insert noops into the instruction stream at the specified point.
unsigned getCallFrameDestroyOpcode() const
int64_t getFrameSize(const MachineInstr &I) const
Returns size of the frame associated with the given frame instruction.
virtual bool isPredicated(const MachineInstr &MI) const
Returns true if the instruction is already predicated.
bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Build the equivalent inputs of a INSERT_SUBREG for the given MI and DefIdx.
virtual bool isThroughputPattern(MachineCombinerPattern Pattern) const
Return true when a code sequence can improve throughput.
virtual ~TargetInstrInfo()
virtual unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const
Compute the instruction latency of a given instruction.
virtual bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1, const MachineRegisterInfo *MRI=nullptr) const
Return true if two machine instructions would produce identical values.
std::optional< DestSourcePair > isCopyInstr(const MachineInstr &MI) const
If the specific machine instruction is a instruction that moves/copies value from one register to ano...
bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const
Return true if the input \P Inst is part of a chain of dependent ops that are suitable for reassociat...
virtual bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
Test if the given instruction should be considered a scheduling boundary.
std::pair< unsigned, unsigned > getReassociationOpcodes(MachineCombinerPattern Pattern, const MachineInstr &Root, const MachineInstr &Prev) const
Reassociation of some instructions requires inverse operations (e.g.
virtual unsigned getPredicationCost(const MachineInstr &MI) const
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual MachineTraceStrategy getMachineCombinerTraceStrategy() const
Return a strategy that MachineCombiner must use when creating traces.
bool getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Build the equivalent inputs of a REG_SEQUENCE for the given MI and DefIdx.
virtual bool hasLoadFromStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const
If the specified machine instruction has a load from a stack slot, return true along with the FrameIn...
virtual std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const
If the specific machine instruction is an instruction that adds an immediate value and a physical reg...
unsigned defaultDefLatency(const MCSchedModel &SchedModel, const MachineInstr &DefMI) const
Return the default expected latency for a def based on its opcode.
static const unsigned CommuteAnyOperandIndex
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
void reassociateOps(MachineInstr &Root, MachineInstr &Prev, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const
Attempt to reassociate \P Root and \P Prev according to \P Pattern to reduce critical path length.
virtual bool isHighLatencyDef(int opc) const
Return true if this opcode has high latency to its result.
static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2, unsigned CommutableOpIdx1, unsigned CommutableOpIdx2)
Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable operand indices to (ResultIdx1,...
virtual unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct load from a stack slot, return the virtual or physic...
bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Build the equivalent inputs of a EXTRACT_SUBREG for the given MI and DefIdx.
virtual bool getExtractSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Target-dependent implementation of getExtractSubregInputs.
bool usePreRAHazardRecognizer() const
Provide a global flag for disabling the PreRA hazard recognizer that targets may choose to honor.
virtual const TargetRegisterClass * getRegClass(const MCInstrDesc &MCID, unsigned OpNum, const TargetRegisterInfo *TRI, const MachineFunction &MF) const
Given a machine instruction descriptor, returns the register class constraint for OpNum,...
bool getMemOperandWithOffset(const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset, bool &OffsetIsScalable, const TargetRegisterInfo *TRI) const
Get the base operand and byte offset of an instruction that reads/writes memory.
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
const InstrItineraryData * getInstrItineraries() const
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetLowering * getTargetLowering() const
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
@ C
The default llvm calling convention, compatible with C.
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
std::pair< MachineOperand, DIExpression * > ParamLoadedValue
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
MachineCombinerPattern
These are instruction patterns matched by the machine combiner pass.
unsigned getKillRegState(bool B)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Machine model for scheduling, bundling, and heuristics.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
A pair composed of a pair of a register and a sub-register index, and another sub-register index.
A pair composed of a register and a sub-register index.
An individual sequence of instructions to be replaced with a call to an outlined function.
MachineBasicBlock::iterator & front()
MachineFunction * getMF() const