37#define DEBUG_TYPE "machine-combiner"
39STATISTIC(NumInstCombined,
"Number of machineinst combined");
43 cl::desc(
"Incremental depth computation will be used for basic "
44 "blocks with more instructions."),
cl::init(500));
47 cl::desc(
"Dump all substituted intrs"),
50#ifdef EXPENSIVE_CHECKS
52 "machine-combiner-verify-pattern-order",
cl::Hidden,
54 "Verify that the generated patterns are ordered by increasing latency"),
58 "machine-combiner-verify-pattern-order",
cl::Hidden,
60 "Verify that the generated patterns are ordered by increasing latency"),
85 void getAnalysisUsage(AnalysisUsage &AU)
const override;
86 bool runOnMachineFunction(MachineFunction &MF)
override;
87 StringRef getPassName()
const override {
return "Machine InstCombiner"; }
90 bool combineInstructions(MachineBasicBlock *);
91 MachineInstr *getOperandDef(
const MachineOperand &MO);
92 bool isTransientMI(
const MachineInstr *
MI);
93 unsigned getDepth(SmallVectorImpl<MachineInstr *> &InsInstrs,
94 DenseMap<Register, unsigned> &InstrIdxForVirtReg,
96 const MachineBasicBlock &
MBB);
97 unsigned getLatency(MachineInstr *Root, MachineInstr *NewRoot,
99 bool improvesCriticalPathLen(MachineBasicBlock *
MBB, MachineInstr *Root,
101 SmallVectorImpl<MachineInstr *> &InsInstrs,
102 SmallVectorImpl<MachineInstr *> &DelInstrs,
103 DenseMap<Register, unsigned> &InstrIdxForVirtReg,
104 unsigned Pattern,
bool SlackIsAccurate);
105 bool reduceRegisterPressure(MachineInstr &Root, MachineBasicBlock *
MBB,
106 SmallVectorImpl<MachineInstr *> &InsInstrs,
107 SmallVectorImpl<MachineInstr *> &DelInstrs,
109 bool preservesResourceLen(MachineBasicBlock *
MBB,
111 SmallVectorImpl<MachineInstr *> &InsInstrs,
112 SmallVectorImpl<MachineInstr *> &DelInstrs);
113 void instr2instrSC(SmallVectorImpl<MachineInstr *> &Instrs,
114 SmallVectorImpl<const MCSchedClassDesc *> &InstrsSC);
115 std::pair<unsigned, unsigned>
116 getLatenciesForInstrSequences(MachineInstr &
MI,
117 SmallVectorImpl<MachineInstr *> &InsInstrs,
118 SmallVectorImpl<MachineInstr *> &DelInstrs,
121 void verifyPatternOrder(MachineBasicBlock *
MBB, MachineInstr &Root,
122 SmallVector<unsigned, 16> &Patterns);
127char MachineCombiner::ID = 0;
131 "Machine InstCombiner",
false,
false)
137void MachineCombiner::getAnalysisUsage(
AnalysisUsage &AU)
const {
138 AU.setPreservesCFG();
151 MachineInstr *DefInstr =
nullptr;
154 DefInstr =
MRI->getUniqueVRegDef(MO.
getReg());
159bool MachineCombiner::isTransientMI(
const MachineInstr *
MI) {
161 return MI->isTransient();
167 if (!
MI->isFullCopy()) {
169 if (
MI->getOperand(0).getSubReg() || Src.isPhysical() || Dst.isPhysical())
172 auto SrcSub =
MI->getOperand(1).getSubReg();
173 auto SrcRC =
MRI->getRegClass(Src);
174 auto DstRC =
MRI->getRegClass(Dst);
175 return TRI->getMatchingSuperRegClass(SrcRC, DstRC, SrcSub) !=
nullptr;
178 if (Src.isPhysical() && Dst.isPhysical())
181 if (Src.isVirtual() && Dst.isVirtual()) {
182 auto SrcRC =
MRI->getRegClass(Src);
183 auto DstRC =
MRI->getRegClass(Dst);
184 return SrcRC->hasSuperClassEq(DstRC) || SrcRC->hasSubClassEq(DstRC);
191 auto DstRC =
MRI->getRegClass(Dst);
192 return DstRC->contains(Src);
204MachineCombiner::getDepth(SmallVectorImpl<MachineInstr *> &InsInstrs,
205 DenseMap<Register, unsigned> &InstrIdxForVirtReg,
207 const MachineBasicBlock &
MBB) {
208 SmallVector<unsigned, 16> InstrDepth;
212 for (
auto *InstrPtr : InsInstrs) {
214 for (
const MachineOperand &MO : InstrPtr->all_uses()) {
218 unsigned DepthOp = 0;
219 unsigned LatencyOp = 0;
220 DenseMap<Register, unsigned>::iterator
II =
222 if (
II != InstrIdxForVirtReg.
end()) {
225 MachineInstr *DefInstr = InsInstrs[
II->second];
227 "There must be a definition for a new virtual register");
228 DepthOp = InstrDepth[
II->second];
232 InstrPtr->findRegisterUseOperandIdx(MO.
getReg(),
nullptr);
236 MachineInstr *DefInstr = getOperandDef(MO);
237 if (DefInstr && (
TII->getMachineCombinerTraceStrategy() !=
238 MachineTraceStrategy::TS_Local ||
241 if (!isTransientMI(DefInstr))
247 InstrPtr->findRegisterUseOperandIdx(MO.
getReg(),
251 IDepth = std::max(IDepth, DepthOp + LatencyOp);
255 unsigned NewRootIdx = InsInstrs.size() - 1;
256 return InstrDepth[NewRootIdx];
268unsigned MachineCombiner::getLatency(MachineInstr *Root, MachineInstr *NewRoot,
271 unsigned NewRootLatency = 0;
273 for (
const MachineOperand &MO : NewRoot->
all_defs()) {
280 if (RI ==
MRI->reg_end())
283 unsigned LatencyOp = 0;
291 LatencyOp = TSchedModel.computeInstrLatency(NewRoot);
293 NewRootLatency = std::max(NewRootLatency, LatencyOp);
295 return NewRootLatency;
302 case MachineCombinerPattern::REASSOC_AX_BY:
303 case MachineCombinerPattern::REASSOC_AX_YB:
304 case MachineCombinerPattern::REASSOC_XA_BY:
305 case MachineCombinerPattern::REASSOC_XA_YB:
306 return CombinerObjective::MustReduceDepth;
308 return TII->getCombinerObjective(Pattern);
316std::pair<unsigned, unsigned> MachineCombiner::getLatenciesForInstrSequences(
317 MachineInstr &
MI, SmallVectorImpl<MachineInstr *> &InsInstrs,
318 SmallVectorImpl<MachineInstr *> &DelInstrs,
320 assert(!InsInstrs.
empty() &&
"Only support sequences that insert instrs.");
321 unsigned NewRootLatency = 0;
323 MachineInstr *NewRoot = InsInstrs.
back();
324 for (
unsigned i = 0; i < InsInstrs.
size() - 1; i++)
325 NewRootLatency += TSchedModel.computeInstrLatency(InsInstrs[i]);
326 NewRootLatency += getLatency(&
MI, NewRoot, BlockTrace);
328 unsigned RootLatency = 0;
329 for (
auto *
I : DelInstrs)
330 RootLatency += TSchedModel.computeInstrLatency(
I);
332 return {NewRootLatency, RootLatency};
335bool MachineCombiner::reduceRegisterPressure(
336 MachineInstr &Root, MachineBasicBlock *
MBB,
337 SmallVectorImpl<MachineInstr *> &InsInstrs,
338 SmallVectorImpl<MachineInstr *> &DelInstrs,
unsigned Pattern) {
351bool MachineCombiner::improvesCriticalPathLen(
352 MachineBasicBlock *
MBB, MachineInstr *Root,
354 SmallVectorImpl<MachineInstr *> &InsInstrs,
355 SmallVectorImpl<MachineInstr *> &DelInstrs,
356 DenseMap<Register, unsigned> &InstrIdxForVirtReg,
unsigned Pattern,
357 bool SlackIsAccurate) {
359 unsigned NewRootDepth =
360 getDepth(InsInstrs, InstrIdxForVirtReg, BlockTrace, *
MBB);
363 LLVM_DEBUG(
dbgs() <<
" Dependence data for " << *Root <<
"\tNewRootDepth: "
364 << NewRootDepth <<
"\tRootDepth: " << RootDepth);
371 if (getCombinerObjective(Pattern) == CombinerObjective::MustReduceDepth) {
374 ?
dbgs() <<
"\t and it does it\n"
375 :
dbgs() <<
"\t but it does NOT do it\n");
376 return NewRootDepth < RootDepth;
384 unsigned NewRootLatency, RootLatency;
385 if (
TII->accumulateInstrSeqToRootLatency(*Root)) {
386 std::tie(NewRootLatency, RootLatency) =
387 getLatenciesForInstrSequences(*Root, InsInstrs, DelInstrs, BlockTrace);
389 NewRootLatency = TSchedModel.computeInstrLatency(InsInstrs.
back());
390 RootLatency = TSchedModel.computeInstrLatency(Root);
394 unsigned NewCycleCount = NewRootDepth + NewRootLatency;
395 unsigned OldCycleCount =
396 RootDepth + RootLatency + (SlackIsAccurate ? RootSlack : 0);
398 <<
"\tRootLatency: " << RootLatency <<
"\n\tRootSlack: "
399 << RootSlack <<
" SlackIsAccurate=" << SlackIsAccurate
400 <<
"\n\tNewRootDepth + NewRootLatency = " << NewCycleCount
401 <<
"\n\tRootDepth + RootLatency + RootSlack = "
404 ?
dbgs() <<
"\n\t It IMPROVES PathLen because"
405 :
dbgs() <<
"\n\t It DOES NOT improve PathLen because");
407 <<
", OldCycleCount = " << OldCycleCount <<
"\n");
409 return NewCycleCount <= OldCycleCount;
413void MachineCombiner::instr2instrSC(
414 SmallVectorImpl<MachineInstr *> &Instrs,
415 SmallVectorImpl<const MCSchedClassDesc *> &InstrsSC) {
416 for (
auto *InstrPtr : Instrs) {
417 unsigned Opc = InstrPtr->getOpcode();
418 unsigned Idx =
TII->get(
Opc).getSchedClass();
425bool MachineCombiner::preservesResourceLen(
427 SmallVectorImpl<MachineInstr *> &InsInstrs,
428 SmallVectorImpl<MachineInstr *> &DelInstrs) {
443 instr2instrSC(InsInstrs, InsInstrsSC);
444 instr2instrSC(DelInstrs, DelInstrsSC);
450 unsigned ResLenAfterCombine =
454 << ResLenBeforeCombine
455 <<
" and after: " << ResLenAfterCombine <<
"\n");
457 ResLenAfterCombine <=
458 ResLenBeforeCombine +
TII->getExtendResourceLenLimit()
459 ?
dbgs() <<
"\t\t As result it IMPROVES/PRESERVES Resource Length\n"
460 :
dbgs() <<
"\t\t As result it DOES NOT improve/preserve Resource "
463 return ResLenAfterCombine <=
464 ResLenBeforeCombine +
TII->getExtendResourceLenLimit();
487 bool IncrementalUpdate) {
497 for (
auto *InstrPtr : InsInstrs)
500 for (
auto *InstrPtr : DelInstrs) {
501 InstrPtr->eraseFromParent();
503 for (
auto *
I = RegUnits.
begin();
I != RegUnits.
end();) {
504 if (
I->MI == InstrPtr)
511 if (IncrementalUpdate)
512 for (
auto *InstrPtr : InsInstrs)
522void MachineCombiner::verifyPatternOrder(MachineBasicBlock *
MBB,
524 SmallVector<unsigned, 16> &Patterns) {
525 long PrevLatencyDiff = std::numeric_limits<long>::max();
526 (void)PrevLatencyDiff;
527 for (
auto P : Patterns) {
530 DenseMap<Register, unsigned> InstrIdxForVirtReg;
531 TII->genAlternativeCodeSequence(Root,
P, InsInstrs, DelInstrs,
539 unsigned NewRootLatency, RootLatency;
540 std::tie(NewRootLatency, RootLatency) = getLatenciesForInstrSequences(
541 Root, InsInstrs, DelInstrs, TraceEnsemble->
getTrace(
MBB));
542 long CurrentLatencyDiff = ((long)RootLatency) - ((
long)NewRootLatency);
543 assert(CurrentLatencyDiff <= PrevLatencyDiff &&
544 "Current pattern is better than previous pattern.");
545 PrevLatencyDiff = CurrentLatencyDiff;
556bool MachineCombiner::combineInstructions(MachineBasicBlock *
MBB) {
560 bool IncrementalUpdate =
false;
562 decltype(BlockIter) LastUpdate;
566 TraceEnsemble = Traces->
getEnsemble(
TII->getMachineCombinerTraceStrategy());
568 SparseSet<LiveRegUnit> RegUnits;
573 bool DoRegPressureReduce =
574 TII->shouldReduceRegisterPressure(
MBB, &RegClassInfo);
576 while (BlockIter !=
MBB->
end()) {
577 auto &
MI = *BlockIter++;
578 SmallVector<unsigned, 16> Patterns;
606 if (!
TII->getMachineCombinerPatterns(
MI, Patterns, DoRegPressureReduce))
610 verifyPatternOrder(
MBB,
MI, Patterns);
612 for (
const auto P : Patterns) {
615 DenseMap<Register, unsigned> InstrIdxForVirtReg;
616 TII->genAlternativeCodeSequence(
MI,
P, InsInstrs, DelInstrs,
621 if (InsInstrs.
empty())
625 dbgs() <<
"\tFor the Pattern (" << (int)
P
626 <<
") these instructions could be removed\n";
627 for (
auto const *InstrPtr : DelInstrs)
628 InstrPtr->print(
dbgs(),
false,
false,
630 dbgs() <<
"\tThese instructions could replace the removed ones\n";
631 for (
auto const *InstrPtr : InsInstrs)
632 InstrPtr->print(
dbgs(),
false,
false,
636 if (IncrementalUpdate && LastUpdate != BlockIter) {
638 TraceEnsemble->
updateDepths(LastUpdate, BlockIter, RegUnits);
639 LastUpdate = BlockIter;
642 if (DoRegPressureReduce &&
643 getCombinerObjective(
P) ==
644 CombinerObjective::MustReduceRegisterPressure) {
647 IncrementalUpdate =
true;
648 LastUpdate = BlockIter;
650 if (reduceRegisterPressure(
MI,
MBB, InsInstrs, DelInstrs,
P)) {
653 RegUnits,
TII,
P, IncrementalUpdate);
663 if (
ML &&
TII->isThroughputPattern(
P)) {
664 LLVM_DEBUG(
dbgs() <<
"\t Replacing due to throughput pattern in loop\n");
666 RegUnits,
TII,
P, IncrementalUpdate);
670 }
else if (OptForSize && InsInstrs.size() < DelInstrs.size()) {
672 << InsInstrs.size() <<
" < "
673 << DelInstrs.size() <<
")\n");
675 RegUnits,
TII,
P, IncrementalUpdate);
687 if (improvesCriticalPathLen(
MBB, &
MI, BlockTrace, InsInstrs, DelInstrs,
688 InstrIdxForVirtReg,
P,
689 !IncrementalUpdate) &&
690 preservesResourceLen(
MBB, BlockTrace, InsInstrs, DelInstrs)) {
693 IncrementalUpdate =
true;
694 LastUpdate = BlockIter;
698 RegUnits,
TII,
P, IncrementalUpdate);
707 for (
auto *InstrPtr : InsInstrs)
708 MF->deleteMachineInstr(InstrPtr);
710 InstrIdxForVirtReg.
clear();
714 if (
Changed && IncrementalUpdate)
719bool MachineCombiner::runOnMachineFunction(MachineFunction &MF) {
724 TSchedModel.
init(STI);
726 MLI = &getAnalysis<MachineLoopInfoWrapperPass>().getLI();
727 Traces = &getAnalysis<MachineTraceMetricsWrapperPass>().getMTM();
728 PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
730 &getAnalysis<LazyMachineBlockFrequencyInfoPass>().getBFI() :
732 TraceEnsemble =
nullptr;
736 if (!
TII->useMachineCombiner()) {
739 <<
" Skipping pass: Target does not support machine combiner\n");
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file defines the DenseMap class.
const HexagonInstrInfo * TII
===- LazyMachineBlockFrequencyInfo.h - Lazy Block Frequency -*- C++ -*–===//
static void insertDeleteInstructions(MachineBasicBlock *MBB, MachineInstr &MI, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, MachineTraceMetrics::Ensemble *TraceEnsemble, SparseSet< LiveRegUnit > &RegUnits, const TargetInstrInfo *TII, unsigned Pattern, bool IncrementalUpdate)
Inserts InsInstrs and deletes DelInstrs.
static cl::opt< bool > VerifyPatternOrder("machine-combiner-verify-pattern-order", cl::Hidden, cl::desc("Verify that the generated patterns are ordered by increasing latency"), cl::init(false))
static cl::opt< unsigned > inc_threshold("machine-combiner-inc-threshold", cl::Hidden, cl::desc("Incremental depth computation will be used for basic " "blocks with more instructions."), cl::init(500))
static cl::opt< bool > dump_intrs("machine-combiner-dump-subst-intrs", cl::Hidden, cl::desc("Dump all substituted intrs"), cl::init(false))
Register const TargetRegisterInfo * TRI
Promote Memory to Register
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Represent the analysis usage information of a pass.
iterator find(const_arg_type_t< KeyT > Val)
This is an alternative analysis pass to MachineBlockFrequencyInfo.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
const MCSchedModel & getSchedModel() const
Get the machine model for this subtarget's CPU.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
LLVM_ABI StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate machine basic b...
Analysis pass which computes a MachineDominatorTree.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Representation of each machine instruction.
const MachineBasicBlock * getParent() const
filtered_mop_range all_defs()
Returns an iterator range over all operands that are (explicit or implicit) register defs.
LLVM_ABI int findRegisterUseOperandIdx(Register Reg, const TargetRegisterInfo *TRI, bool isKill=false) const
Returns the operand index that is a use of the specific register or -1 if it is not found.
LLVM_ABI int findRegisterDefOperandIdx(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false) const
Returns the operand index that is a def of the specified register or -1 if it is not found.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
defusechain_iterator< true, true, false, true, false > reg_iterator
reg_iterator/reg_begin/reg_end - Walk all defs and uses of the specified register.
A trace ensemble is a collection of traces selected using the same strategy, for example 'minimum res...
void invalidate(const MachineBasicBlock *MBB)
Invalidate traces through BadMBB.
void updateDepth(TraceBlockInfo &TBI, const MachineInstr &, SparseSet< LiveRegUnit > &RegUnits)
Updates the depth of an machine instruction, given RegUnits.
void updateDepths(MachineBasicBlock::iterator Start, MachineBasicBlock::iterator End, SparseSet< LiveRegUnit > &RegUnits)
Updates the depth of the instructions from Start to End.
Trace getTrace(const MachineBasicBlock *MBB)
Get the trace that passes through MBB.
unsigned getResourceLength(ArrayRef< const MachineBasicBlock * > Extrablocks={}, ArrayRef< const MCSchedClassDesc * > ExtraInstrs={}, ArrayRef< const MCSchedClassDesc * > RemoveInstrs={}) const
Return the resource length of the trace.
InstrCycles getInstrCycles(const MachineInstr &MI) const
Return the depth and height of MI.
unsigned getInstrSlack(const MachineInstr &MI) const
Return the slack of MI.
bool isDepInTrace(const MachineInstr &DefMI, const MachineInstr &UseMI) const
A dependence is useful if the basic block of the defining instruction is part of the trace of the use...
Ensemble * getEnsemble(MachineTraceStrategy)
Get the trace ensemble representing the given trace selection strategy.
void verifyAnalysis() const
void invalidate(const MachineBasicBlock *MBB)
Invalidate cached information about MBB.
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool hasProfileSummary() const
Returns true if profile summary is available.
LLVM_ABI void runOnMachineFunction(const MachineFunction &MF, bool Rev=false)
runOnFunction - Prepare to answer questions about MF.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
SparseSet - Fast set implementation for objects that can be identified by small unsigned keys.
iterator erase(iterator I)
erase - Erases an existing element identified by a valid iterator.
const_iterator begin() const
const_iterator end() const
void setUniverse(unsigned U)
setUniverse - Set the universe size which determines the largest key the set can hold.
TargetInstrInfo - Interface to description of machine instruction set.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
LLVM_ABI bool hasInstrSchedModel() const
Return true if this machine model includes an instruction-level scheduling model.
LLVM_ABI void init(const TargetSubtargetInfo *TSInfo, bool EnableSModel=true, bool EnableSItins=true)
Initialize the machine model for instruction scheduling.
LLVM_ABI unsigned computeOperandLatency(const MachineInstr *DefMI, unsigned DefOperIdx, const MachineInstr *UseMI, unsigned UseOperIdx) const
Compute operand latency based on the available machine model.
bool hasInstrSchedModelOrItineraries() const
Return true if this machine model includes an instruction-level scheduling model or cycle-to-cycle it...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI void initializeMachineCombinerPass(PassRegistry &)
LLVM_ABI bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
LLVM_ABI char & MachineCombinerID
This pass performs instruction combining using trace metrics to estimate critical-path and resource d...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
ArrayRef(const T &OneElt) -> ArrayRef< T >
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Machine model for scheduling, bundling, and heuristics.
const MCSchedClassDesc * getSchedClassDesc(unsigned SchedClassIdx) const
unsigned Depth
Earliest issue cycle as determined by data dependencies and instruction latencies from the beginning ...